[
  {
    "path": ".github/FUNDING.yml",
    "content": "# These are supported funding model platforms\n\ngithub: buraksezer\npatreon: # not used anymore\nopen_collective: # Replace with a single Open Collective username\nko_fi: # Replace with a single Ko-fi username\ntidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel\ncommunity_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry\nliberapay: # Replace with a single Liberapay username\nissuehunt: # Replace with a single IssueHunt username\notechie: # Replace with a single Otechie username\ncustom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: Unit & Integration tests\n\non:\n  push:\n    branches: [ \"master\" ]\n  pull_request:\n    branches: [ \"master\" ]\n\njobs:\n  test:\n    strategy:\n      # Default is true, cancels jobs for other platforms in the matrix if one fails\n      fail-fast: false\n      matrix:\n        os: [ ubuntu-latest ]\n        go: [ '1.23', '1.24' ]\n\n    runs-on: ${{ matrix.os }}\n\n    steps:\n    - name: Install Go\n      uses: actions/setup-go@v4\n      with:\n        go-version: ${{ matrix.go }}\n\n    - name: Checkout code\n      uses: actions/checkout@v3\n\n    - name: Print Go version and environment\n      id: vars\n      run: |\n        printf \"Using go at: $(which go)\\n\"\n        printf \"Go version: $(go version)\\n\"\n        printf \"\\n\\nGo environment:\\n\\n\"\n        go env\n        printf \"\\n\\nSystem environment:\\n\\n\"\n        env\n        # Calculate the short SHA1 hash of the git commit\n        echo \"::set-output name=short_sha::$(git rev-parse --short HEAD)\"\n        echo \"::set-output name=go_cache::$(go env GOCACHE)\"\n\n    - name: Cache the build cache\n      uses: actions/cache@v4\n      with:\n        path: ${{ steps.vars.outputs.go_cache }}\n        key: ${{ runner.os }}-${{ matrix.go }}-go-ci-${{ hashFiles('**/go.sum') }}\n        restore-keys: |\n          ${{ runner.os }}-${{ matrix.go }}-go-ci\n\n    - name: Install dependencies\n      run: |\n        go mod download\n\n    - name: Run tests\n      run: make test\n"
  },
  {
    "path": ".github/workflows/codeql-analysis.yml",
    "content": "# For most projects, this workflow file will not need changing; you simply need\n# to commit it to your repository.\n#\n# You may wish to alter this file to override the set of languages analyzed,\n# or to provide custom queries or build logic.\n#\n# ******** NOTE ********\n# We have attempted to detect the languages in your repository. Please check\n# the `language` matrix defined below to confirm you have the correct set of\n# supported CodeQL languages.\n#\nname: \"CodeQL\"\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    # The branches below must be a subset of the branches above\n    branches: [ master ]\n  schedule:\n    - cron: '28 20 * * 0'\n\njobs:\n  analyze:\n    name: Analyze\n    runs-on: ubuntu-latest\n    permissions:\n      actions: read\n      contents: read\n      security-events: write\n\n    strategy:\n      fail-fast: false\n      matrix:\n        language: [ 'go' ]\n        # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]\n        # Learn more about CodeQL language support at https://git.io/codeql-language-support\n\n    steps:\n    - name: Checkout repository\n      uses: actions/checkout@v2\n\n    # Initializes the CodeQL tools for scanning.\n    - name: Initialize CodeQL\n      uses: github/codeql-action/init@v3\n      with:\n        languages: ${{ matrix.language }}\n        # If you wish to specify custom queries, you can do so here or in a config file.\n        # By default, queries listed here will override any specified in a config file.\n        # Prefix the list here with \"+\" to use these queries and those in the config file.\n        # queries: ./path/to/local/query, your-org/your-repo/queries@main\n\n    # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).\n    # If this step fails, then you should remove it and run the build manually (see below)\n    - name: Autobuild\n      uses: github/codeql-action/autobuild@v3\n\n    # ℹ️ Command-line programs to run using the OS shell.\n    # 📚 https://git.io/JvXDl\n\n    # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines\n    #    and modify them (or add more) to build your code if your project\n    #    uses a compiled language\n\n    #- run: |\n    #   make bootstrap\n    #   make release\n\n    - name: Perform CodeQL Analysis\n      uses: github/codeql-action/analyze@v3\n"
  },
  {
    "path": ".github/workflows/golangci-lint.yml",
    "content": "name: golangci-lint\non:\n  push:\n    branches:\n      - master\n  pull_request:\n\npermissions:\n  contents: read\n  # Optional: allow read access to pull request. Use with `only-new-issues` option.\n  # pull-requests: read\n\njobs:\n  golangci:\n    name: lint\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: actions/setup-go@v5\n        with:\n          go-version: stable\n      - name: golangci-lint\n        uses: golangci/golangci-lint-action@v7\n        with:\n          version: v2.0"
  },
  {
    "path": ".gitignore",
    "content": "# Binaries for programs and plugins\n*.exe\n*.dll\n*.so\n*.dylib\n\n# Vim creates this\n*.swp\n\n# Test binary, build with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736\n.glide/\n\n# GoLand creates this\n.idea/\n\n# OSX creates this\n.DS_Store\n\n.claude/\nCLAUDE.md\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM golang:latest as build\nWORKDIR /src/\nCOPY . /src/\nRUN go mod download\nRUN CGO_ENABLED=1 go build -ldflags=\"-s -w\" -o /usr/bin/olric-server /src/cmd/olric-server\n\nFROM gcr.io/distroless/base-debian12\nCOPY --from=build /usr/bin/olric-server /usr/bin/olric-server\nCOPY --from=build /src/olric-server-docker.yaml /etc/olric-server.yaml\n\nEXPOSE 3320 3322\nENTRYPOINT [\"/usr/bin/olric-server\", \"-c\", \"/etc/olric-server.yaml\"]\n"
  },
  {
    "path": "LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        https://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   Copyright 2018-2025 The Olric Authors\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       https://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "Makefile",
    "content": ".PHONY: test\ntest:\n\tgo test -p 1 ./...\n\n.PHONY: test-quick\ntest-quick:\n\tgo test -p 1 -count=1 ./...\n\n.PHONY: test-race\ntest-race:\n\tgo test -p 1 -race ./...\n\n.PHONY: format\nformat:\n\tgo fmt ./...\n\n.PHONY: prepare-merge\nprepare-merge: format test\n\n.PHONY: ci\nci: test\n\n.PHONY: ci-quick\nci-full: test-quick\n\n.PHONY: install\ninstall:\n\tgo install -ldflags=\"-s -w\" -v ./cmd/*"
  },
  {
    "path": "README.md",
    "content": "# Olric [![Tweet](https://img.shields.io/twitter/url/http/shields.io.svg?style=social)](https://twitter.com/intent/tweet?text=Olric%3A+Distributed+and+in-memory+key%2Fvalue+database.+It+can+be+used+both+as+an+embedded+Go+library+and+as+a+language-independent+service.+&url=https://github.com/olric-data/olric/&hashtags=golang,distributed,database)\n\n[![Go Reference](https://pkg.go.dev/badge/github.com/olric-data/olric/.svg)](https://pkg.go.dev/github.com/olric-data/olric/) [![Go Report Card](https://goreportcard.com/badge/olric-data/olric)](https://goreportcard.com/report/github.com/olric-data/olric/) [![Discord](https://img.shields.io/discord/721708998021087273.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/ahK7Vjr8We) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\n\nDistributed In-Memory Cache & Key/Value Store\n\nOlric provides a simple way to create a **fast, scalable, and shared pool of RAM** across a cluster of machines. \nIt's a distributed, in-memory key/value store and cache, written entirely in Go and designed specifically for distributed environments.\n\n**Flexible Deployment:**\n\n* **Embedded Go Library:** Integrate Olric directly into your Go applications.\n* **Standalone Service:** Run Olric as a language-independent service.\n\n**Key Features:**\n\n* **Effortless Scalability:** Designed to handle hundreds of members and thousands of clients. New nodes auto-discover the cluster and linearly increase capacity.\n* **Automatic Distribution:** Provides partitioning (sharding) and data re-balancing out-of-the-box, requiring no external coordination services. Data and backups are automatically balanced when capacity is added.\n* **Wide Client Support:** Uses the standard **Redis Serialization Protocol (RESP)**, ensuring client libraries are available in nearly all major programming languages.\n* **Common Use Cases:** Ideal for distributed caching, managing application cluster state, and implementing publish-subscribe messaging.\n\nSee [Docker](#docker) and [Samples](#samples) sections to get started! \n\nJoin our [Discord server!](https://discord.gg/ahK7Vjr8We)\n\nThe current production version is [v0.7.0](https://github.com/olric-data/olric/tree/release/v0.7)\n\n### About renaming the module\n\n`github.com/buraksezer/olric` module has been renamed to `github.com/olric-data/olric`. This change has been effective since **v0.6.0**.\nImporting previous versions should redirect you to the new repository, but you should change the import paths in your codebase as soon as possible.\n\nThere is no other difference between v0.5.7 and v0.6.0.\n\n## At a glance\n\n* Designed to share some transient, approximate, fast-changing data between servers,\n* Uses Redis serialization protocol,\n* Implements a distributed hash table,\n* Provides a drop-in replacement for Redis Publish/Subscribe messaging system,\n* Supports both programmatic and declarative configuration, \n* Embeddable but can be used as a language-independent service with *olric-server*,\n* Supports different eviction algorithms (including LRU and TTL),\n* Highly available and horizontally scalable,\n* Provides best-effort consistency guarantees without being a complete CP (indeed PA/EC) solution,\n* Supports replication by default (with sync and async options),\n* Quorum-based voting for replica control (Read/Write quorums),\n* Supports atomic operations,\n* Provides an iterator on distributed maps,\n* Provides a plugin interface for service discovery daemons,\n* Provides a locking primitive which inspired by [SETNX of Redis](https://redis.io/commands/setnx#design-pattern-locking-with-codesetnxcode),\n\n## Possible Use Cases\n\nOlric is an eventually consistent, unordered key/value data store. It supports various eviction mechanisms for distributed caching implementations. Olric \nalso provides publish-subscribe messaging, data replication, failure detection and simple anti-entropy services. \n\nIt's good at distributed caching and publish/subscribe messaging.\n\n## Table of Contents\n\n* [Features](#features)\n* [Support](#support)\n* [Installing](#installing)\n  * [Docker](#docker)\n* [Getting Started](#getting-started)\n  * [Operation Modes](#operation-modes)\n    * [Embedded Member](#embedded-member)\n    * [Client-Server](#client-server)\n* [Golang Client](#golang-client)\n* [Cluster Events](#cluster-events)\n* [Authentication](#authentication)\n* [Commands](#commands)\n  * [Distributed Map](#distributed-map)\n    * [DM.PUT](#dmput)\n    * [DM.GET](#dmget)\n    * [DM.DEL](#dmdel)\n    * [DM.EXPIRE](#dmexpire)\n    * [DM.PEXPIRE](#dmpexpire)\n    * [DM.DESTROY](#dmdestroy)\n    * [Atomic Operations](#atomic-operations)\n      * [DM.INCR](#dmincr)\n      * [DM.DECR](#dmdecr)\n      * [DM.GETPUT](#dmgetput)\n      * [DM.INCRBYFLOAT](#dmincrbyfloat)\n    * [Locking](#locking)\n      * [DM.LOCK](#dmlock)\n      * [DM.UNLOCK](#dmunlock)\n      * [DM.LOCKLEASE](#dmlocklease)\n      * [DM.PLOCKLEASE](#dmplocklease)\n    * [DM.SCAN](#dmscan)\n  * [Publish-Subscribe](#publish-subscribe)\n    * [SUBSCRIBE](#subscribe)\n    * [PSUBSCRIBE](#psubscribe)\n    * [UNSUBSCRIBE](#unsubscribe)\n    * [PUNSUBSCRIBE](#punsubscribe)\n    * [PUBSUB CHANNELS](#pubsub-channels)\n    * [PUBSUB NUMPAT](#pubsub-numpat)\n    * [PUBSUB NUMSUB](#pubsub-numsub)\n    * [QUIT](#quit)\n    * [PING](#ping)\n  * [Cluster](#cluster)\n    * [CLUSTER.ROUTINGTABLE](#clusterroutingtable)\n    * [CLUSTER.MEMBERS](#clustermembers)\n  * [Others](#others)\n    * [PING](#ping)\n    * [STATS](#stats)\n    * [AUTH](#auth)\n* [Configuration](#configuration)\n    * [Embedded Member Mode](#embedded-member-mode)\n      * [Manage the configuration in YAML format](#manage-the-configuration-in-yaml-format)\n    * [Client-Server Mode](#client-server-mode)\n    * [Network Configuration](#network-configuration)\n    * [Service discovery](#service-discovery)\n    * [Timeouts](#timeouts)\n* [Architecture](#architecture)\n  * [Overview](#overview)\n  * [Consistency and Replication Model](#consistency-and-replication-model)\n    * [Last-write-wins conflict resolution](#last-write-wins-conflict-resolution)\n    * [PACELC Theorem](#pacelc-theorem)\n    * [Read-Repair on DMaps](#read-repair-on-dmaps)\n    * [Quorum-based Replica Control](#quorum-based-replica-control)\n    * [Simple Split-Brain Protection](#simple-split-brain-protection)\n  * [Eviction](#eviction)\n    * [Expire with TTL](#expire-with-ttl)\n    * [Expire with MaxIdleDuration](#expire-with-maxidleduration)\n    * [Expire with LRU](#expire-with-lru)\n  * [Lock Implementation](#lock-implementation)\n  * [Storage Engine](#storage-engine)\n* [Samples](#samples)\n* [Contributions](#contributions)\n* [License](#license)\n* [About the name](#about-the-name)\n\n\n## Features\n\n* Designed to share some transient, approximate, fast-changing data between servers,\n* Accepts arbitrary types as value,\n* Only in-memory,\n* Uses Redis protocol,\n* Compatible with existing Redis clients,\n* Embeddable but can be used as a language-independent service with olric-server,\n* GC-friendly storage engine,\n* O(1) running time for lookups,\n* Supports atomic operations,\n* Provides a lock implementation which can be used for non-critical purposes,\n* Different eviction policies: LRU, MaxIdleDuration and Time-To-Live (TTL),\n* Highly available,\n* Horizontally scalable,\n* Provides best-effort consistency guarantees without being a complete CP (indeed PA/EC) solution,\n* Distributes load fairly among cluster members with a [consistent hash function](https://github.com/buraksezer/consistent),\n* Supports replication by default (with sync and async options),\n* Quorum-based voting for replica control,\n* Thread-safe by default,\n* Provides an iterator on distributed maps,\n* Provides a plugin interface for service discovery daemons and cloud providers,\n* Provides a locking primitive which inspired by [SETNX of Redis](https://redis.io/commands/setnx#design-pattern-locking-with-codesetnxcode),\n* Provides a drop-in replacement of Redis' Publish-Subscribe messaging feature.\n\nSee the [Architecture](#architecture) section to see details.\n\n## Support\n\nWe have a few communication channels: \n\n* [Issue Tracker](https://github.com/olric-data/olric/issues)\n* [Discord server](https://discord.gg/ahK7Vjr8We)\n\nYou should know that the issue tracker is only intended for bug reports and feature requests.\n\nSoftware doesn't maintain itself. If you need support on complex topics or request new features, please consider [sponsoring Olric](https://github.com/sponsors/buraksezer).\n\n## Installing\n\nWith a correctly configured Golang environment:\n\n```\ngo install github.com/olric-data/olric/cmd/olric-server@v0.7.0\n```\n\nNow you can start using Olric:\n\n```\nolric-server -c cmd/olric-server/olric-server-local.yaml\n```\n\nSee the [Configuration](#configuration) section to create your cluster properly.\n\n### Docker\n\nYou can launch `olric-server` Docker container by running the following command. \n\n```bash\ndocker pull ghcr.io/olric-data/olric:latest\n``` \n\nThis command will pull olric-server Docker image and run a new Olric Instance. You should know that the container exposes \n`3320` and `3322` ports. \n\nNow, you can access an Olric cluster using any Redis client including `redis-cli`:\n\n```bash\nredis-cli -p 3320\n127.0.0.1:3320> DM.PUT my-dmap my-key \"Olric Rocks!\"\nOK\n127.0.0.1:3320> DM.GET my-dmap my-key\n\"Olric Rocks!\"\n127.0.0.1:3320>\n```\n\n## Getting Started\n\nWith olric-server, you can create an Olric cluster with a few commands. This is how to install olric-server:\n\n```bash\ngo install github.com/olric-data/olric/cmd/olric-server@v0.7.0\n```\n\nLet's create a cluster with the following:\n\n```\nolric-server -c <YOUR_CONFIG_FILE_PATH>\n```\n\nYou can find the sample configuration file under `cmd/olric-server/olric-server-local.yaml`. It can perfectly run with single node. \nolric-server also supports `OLRIC_SERVER_CONFIG` environment variable to set configuration. Just like that: \n\n```\nOLRIC_SERVER_CONFIG=<YOUR_CONFIG_FILE_PATH> olric-server\n```\n\nOlric uses [hashicorp/memberlist](https://github.com/hashicorp/memberlist) for failure detection and cluster membership. \nCurrently, there are different ways to discover peers in a cluster. You can use a static list of nodes in your configuration. \nIt's ideal for development and test environments. Olric also supports Consul, Kubernetes and all well-known cloud providers\nfor service discovery. Please take a look at [Service Discovery](#service-discovery) section for further information.\n\nSee [Client-Server](#client-server) section to get more information about this deployment scenario.\n\n#### Maintaining a list of peers manually\n\nBasically, there is a list of nodes under `memberlist` block in the configuration file. In order to create an Olric cluster, \nyou just need to add `Host:Port` pairs of the other nodes. Please note that the `Port` is the memberlist port of the peer.\nIt is `3322` by default. \n\n```yaml\nmemberlist:\n  peers:\n    - \"localhost:3322\"\n```\n\nThanks to [hashicorp/memberlist](https://github.com/hashicorp/memberlist), Olric nodes can share the full list of members \nwith each other. So an Olric node can discover the whole cluster by using a single member address.\n\n#### Embedding into your Go application.\n\nSee [Samples](#samples) section to learn how to embed Olric into your existing Golang application.\n\n### Operation Modes\n\nOlric has two different operation modes.\n\n#### Embedded Member\n\nIn Embedded Member Mode, members include both the application and Olric data and services. The advantage of the Embedded\nMember Mode is having a low-latency data access and locality.\n\n#### Client-Server\n\nIn Client-Server Mode, Olric data and services are centralized in one or more servers, and they are accessed by the \napplication through clients. You can have a cluster of servers that can be independently created and scaled. Your clients \ncommunicate with these members to reach to Olric data and services on them.\n\nClient-Server deployment has advantages including more predictable and reliable performance, easier identification\nof problem causes and, most importantly, better scalability. When you need to scale in this deployment type, just add more\nOlric server members. You can address client and server scalability concerns separately.\n\n## Golang Client\n\nThe official Golang client is defined by the `Client` interface. There are two different implementations of that interface in \nthis repository. `EmbeddedClient` provides a client implementation for [embedded-member](#embedded-member) scenario, \n`ClusterClient` provides an implementation of the same interface for [client-server](#client-server) deployment scenario. \nObviously, you can use `ClusterClient` for your embedded-member deployments. But it's good to use `EmbeddedClient` provides \na better performance due to localization of the queries.\n\nSee the client documentation on [pkg.go.dev](https://pkg.go.dev/github.com/olric-data/olric/@v0.7.0)\n\n## Cluster Events\n\nOlric can send push cluster events to `cluster.events` channel. Available cluster events:\n\n* node-join-event\n* node-left-event\n* fragment-migration-event\n* fragment-received-even\n\nIf you want to receive these events, set `true` to `EnableClusterEventsChannel` and subscribe to `cluster.events` channel. \nThe default is `false`.\n\nSee the [events/cluster_events.go](events/cluster_events.go) file to get more information about events.\n\n## Authentication\n\nOlric supports simple password-based authentication to restrict access to the data store. This mechanism is similar to the \n`requirepass` directive in Redis and is intended to provide a basic level of protection in trusted environments (e.g., \ninternal networks or local development).\n\n> **Important**: This authentication method **does not provide transport-layer encryption or full access control**. For secure\n> deployments over untrusted networks (e.g., Internet), it's strongly recommended to place Olric behind a reverse proxy with TLS \n> support or use a secure network overlay (e.g., WireGuard, VPN).\n\n### YAML-based Configuration\n\nYou can enable password-based authentication by adding the `authentication` block to your configuration file:\n\n```yaml\nauthentication:\n  password: \"your-password\"\n```\n\nWhen this is set, all clients must authenticate using the provided password before performing any operations.\n\n### Programmatic Configuration (Go API)\n\nFor applications embedding Olric or configuring it dynamically in Go, you can enable authentication as follows:\n\n```go\nc := config.New(\"local\")\nc.Authentication = &config.Authentication{\n    Password: \"your-password\",\n}\n```\n\nThis sets the password required for any client to interact with the Olric node.\n\n### Client-Side Usage\n\nClients must send the password using the [AUTH](#auth) command. If the password is incorrect or not provided, the connection will \nbe denied or commands will be rejected.\n\nWith the cluster client, you can use `WithPassword` cluster client option.\n\n```go\nclient, err := NewClusterClient([]string{db.name}, WithPassword(\"test-password\"))\n```\n\n**Important:** The embedded client has not been covered by the authentication implementation.\n\n## Commands\n\nOlric uses Redis protocol and supports Redis-style commands to query the database. You can use any Redis client, including\n`redis-cli`. The official Go client is a thin layer around [go-redis/redis](https://github.com/go-redis/redis) package. \nSee [Golang Client](#golang-client) section for the documentation.\n\n### Distributed Map\n\n#### DM.PUT \n\nDM.PUT sets the value for the given key. It overwrites any previous value for that key.\n\n```\nDM.PUT dmap key value [ EX seconds | PX milliseconds | EXAT unix-time-seconds | PXAT unix-time-milliseconds ] [ NX | XX]\n```\n\n**Example:**\n```\n127.0.0.1:3320> DM.PUT my-dmap my-key value\nOK\n```\n\n**Options:**\n\nThe DM.PUT command supports a set of options that modify its behavior:\n\n* **EX** *seconds* -- Set the specified expire time, in seconds.\n* **PX** *milliseconds* -- Set the specified expire time, in milliseconds.\n* **EXAT** *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds.\n* **PXAT** *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds.\n* **NX** -- Only set the key if it does not already exist.\n* **XX** -- Only set the key if it already exist.\n\n**Return:**\n\n* **Simple string reply:** OK if DM.PUT was executed correctly.\n* **KEYFOUND:** (error) if the DM.PUT operation was not performed because the user specified the NX option but the condition was not met.\n* **KEYNOTFOUND:** (error) if the DM.PUT operation was not performed because the user specified the XX option but the condition was not met.\n\n#### DM.GET\n\nDM.GET gets the value for the given key. It returns (error)`KEYNOTFOUND` if the key doesn't exist. \n\n```\nDM.GET dmap key\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.GET dmap key\n\"value\"\n```\n\n**Return:**\n\n**Bulk string reply**: the value of key, or (error)`KEYNOTFOUND` when key does not exist.\n\n#### DM.DEL\n\nDM.DEL deletes values for the given keys. It doesn't return any error if the key does not exist.\n\n```\nDM.DEL dmap key [key...]\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.DEL dmap key1 key2\n(integer) 2\n```\n\n**Return:**\n\n* **Integer reply**: The number of keys that were removed.\n\n#### DM.EXPIRE\n\nDM.EXPIRE updates or sets the timeout for the given key. It returns `KEYNOTFOUND` if the key doesn't exist. After the timeout has expired, \nthe key will automatically be deleted. \n\nThe timeout will only be cleared by commands that delete or overwrite the contents of the key, including DM.DEL, DM.PUT, DM.GETPUT.\n\n```\nDM.EXPIRE dmap key seconds\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.EXPIRE dmap key 1\nOK\n```\n\n**Return:**\n\n* **Simple string reply:** OK if DM.EXPIRE was executed correctly.\n* **KEYNOTFOUND:** (error) when key does not exist.\n\n#### DM.PEXPIRE\n\nDM.PEXPIRE updates or sets the timeout for the given key. It returns `KEYNOTFOUND` if the key doesn't exist. After the timeout has expired,\nthe key will automatically be deleted.\n\nThe timeout will only be cleared by commands that delete or overwrite the contents of the key, including DM.DEL, DM.PUT, DM.GETPUT.\n\n```\nDM.PEXPIRE dmap key milliseconds\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.PEXPIRE dmap key 1000\nOK\n```\n\n**Return:**\n\n* **Simple string reply:** OK if DM.EXPIRE was executed correctly.\n* **KEYNOTFOUND:** (error) when key does not exist.\n\n#### DM.DESTROY\n\nDM.DESTROY flushes the given DMap on the cluster. You should know that there is no global lock on DMaps. DM.PUT and DM.DESTROY commands\nmay run concurrently on the same DMap. \n\n```\nDM.DESTROY dmap\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.DESTROY dmap\nOK\n```\n\n**Return:**\n\n* **Simple string reply:** OK, if DM.DESTROY was executed correctly.\n\n### Atomic Operations\n\nOperations on key/value pairs are performed by the partition owner. In addition, atomic operations are guarded by a lock implementation which can be found under `internal/locker`. It means that\nOlric guaranties consistency of atomic operations, if there is no network partition. Basic flow for `DM.INCR`:\n\n* Acquire the lock for the given key,\n* Call `DM.GET` to retrieve the current value,\n* Calculate the new value,\n* Call `DM.PUT` to set the new value,\n* Release the lock.\n\nIt's important to know that if you call `DM.PUT` and `DM.GETPUT` concurrently on the same key, this will break the atomicity.\n\n`internal/locker` package is provided by [Docker](https://github.com/moby/moby).\n\n**Important note about consistency:**\n\nYou should know that Olric is a PA/EC (see [Consistency and Replication Model](#consistency-and-replication-model)) product. So if your network is stable, all the operations on key/value\npairs are performed by a single cluster member. It means that you can be sure about the consistency when the cluster is stable. It's important to know that computer networks fail\noccasionally, processes crash and random GC pauses may happen. Many factors can lead a network partitioning. If you cannot tolerate losing strong consistency under network partitioning,\nyou need to use a different tool for atomic operations.\n\nSee [Hazelcast and the Mythical PA/EC System](https://dbmsmusings.blogspot.com/2017/10/hazelcast-and-mythical-paec-system.html) and [Jepsen Analysis on Hazelcast 3.8.3](https://hazelcast.com/blog/jepsen-analysis-hazelcast-3-8-3/) for more insight on this topic.\n\n\n#### DM.INCR\n\nDM.INCR atomically increments the number stored at key by delta. The return value is the new value after being incremented or an error.\n\n```\nDM.INCR dmap key delta\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.INCR dmap key 10\n(integer) 10\n```\n\n**Return:**\n\n* **Integer reply:** the value of key after the increment.\n\n#### DM.DECR\n\nDM.DECR atomically decrements the number stored at key by delta. The return value is the new value after being incremented or an error.\n\n```\nDM.DECR dmap key delta\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.DECR dmap key 10\n(integer) 0\n```\n\n**Return:**\n\n* **Integer reply:** the value of key after the increment.\n\n#### DM.GETPUT\n\nDM.GETPUT atomically sets key to value and returns the old value stored at the key.\n\n```\nDM.GETPUT dmap key value\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.GETPUT dmap key value-1\n(nil)\n127.0.0.1:3320> DM.GETPUT dmap key value-2\n\"value-1\"\n```\n\n**Return:**\n\n* **Bulk string reply**: the old value stored at the key.\n\n#### DM.INCRBYFLOAT\n\nDM.INCRBYFLOAT atomically increments the number stored at key by delta. The return value is the new value after being incremented or an error.\n\n```\nDM.INCRBYFLOAT dmap key delta\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.PUT dmap key 10.50\nOK\n127.0.0.1:3320> DM.INCRBYFLOAT dmap key 0.1\n\"10.6\"\n127.0.0.1:3320> DM.PUT dmap key 5.0e3\nOK\n127.0.0.1:3320> DM.INCRBYFLOAT dmap key 2.0e2\n\"5200\"\n```\n\n**Return:**\n\n* **Bulk string reply**: the value of key after the increment.\n\n\n### Locking\n\n**Important:** The lock provided by DMap implementation is approximate and only to be used for non-critical purposes.\n\nThe DMap implementation is already thread-safe to meet your thread safety requirements. When you want to have more control on the\nconcurrency, you can use **DM.LOCK** command. Olric borrows the locking algorithm from Redis. Redis authors propose\nthe following algorithm:\n\n> The command <SET resource-name anystring NX EX max-lock-time> is a simple way to implement a locking system with Redis.\n>\n> A client can acquire the lock if the above command returns OK (or retry after some time if the command returns Nil), and remove the lock just using DEL.\n>\n> The lock will be auto-released after the expire time is reached.\n>\n> It is possible to make this system more robust modifying the unlock schema as follows:\n>\n> Instead of setting a fixed string, set a non-guessable large random string, called token.\n> Instead of releasing the lock with DEL, send a script that only removes the key if the value matches.\n> This avoids that a client will try to release the lock after the expire time deleting the key created by another client that acquired the lock later.\n\nEquivalent of `SETNX` command in Olric is `DM.PUT dmap key value NX`. DM.LOCK command are properly implements\nthe algorithm which is proposed above.\n\nYou should know that this implementation is subject to the clustering algorithm. So there is no guarantee about reliability in the case of network partitioning. I recommend the lock implementation to be used for\nefficiency purposes in general, instead of correctness.\n\n**Important note about consistency:**\n\nYou should know that Olric is a PA/EC (see [Consistency and Replication Model](#consistency-and-replication-model)) product. So if your network is stable, all the operations on key/value\npairs are performed by a single cluster member. It means that you can be sure about the consistency when the cluster is stable. It's important to know that computer networks fail\noccasionally, processes crash and random GC pauses may happen. Many factors can lead a network partitioning. If you cannot tolerate losing strong consistency under network partitioning,\nyou need to use a different tool for locking.\n\nSee [Hazelcast and the Mythical PA/EC System](https://dbmsmusings.blogspot.com/2017/10/hazelcast-and-mythical-paec-system.html) and [Jepsen Analysis on Hazelcast 3.8.3](https://hazelcast.com/blog/jepsen-analysis-hazelcast-3-8-3/) for more insight on this topic.\n\n#### DM.LOCK\n\nDM.LOCK sets a lock for the given key. The acquired lock is only valid for the key in this DMap.\nIt returns immediately if it acquires the lock for the given key. Otherwise, it waits until deadline.\n\nDM.LOCK returns a token. You must keep that token to unlock the key. Using prefixed keys is highly recommended.\nIf the key does already exist in the DMap, DM.LOCK will wait until the deadline is exceeded.\n\n```\nDM.LOCK dmap key seconds [ EX seconds | PX milliseconds ]\n```\n\n**Options:**\n\n* **EX** *seconds* -- Set the specified expire time, in seconds.\n* **PX** *milliseconds* -- Set the specified expire time, in milliseconds.\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.LOCK dmap lock.key 10\n2363ec600be286cb10fbb35181efb029\n```\n\n**Return:**\n\n* **Simple string reply:** a token to unlock or lease the lock.\n* **NOSUCHLOCK**: (error) returned when the requested lock does not exist.\n* **LOCKNOTACQUIRED**: (error) returned when the requested lock could not be acquired.\n\n#### DM.UNLOCK\n\nDM.UNLOCK releases an acquired lock for the given key. It returns `NOSUCHLOCK` if there is no lock for the given key.\n\n```\nDM.UNLOCK dmap key token\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.UNLOCK dmap key 2363ec600be286cb10fbb35181efb029\nOK\n```\n\n**Return:**\n\n* **Simple string reply:** OK if DM.UNLOCK was executed correctly.\n* **NOSUCHLOCK**: (error) returned when the lock does not exist.\n\n#### DM.LOCKLEASE\n\nDM.LOCKLEASE sets or updates the timeout of the acquired lock for the given key. It returns `NOSUCHLOCK` if there is no lock for the given key.\n\nDM.LOCKLEASE accepts seconds as timeout.\n\n```\nDM.LOCKLEASE dmap key token seconds\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.LOCKLEASE dmap key 2363ec600be286cb10fbb35181efb029 100\nOK\n```\n\n**Return:**\n\n* **Simple string reply:** OK if DM.UNLOCK was executed correctly.\n* **NOSUCHLOCK**: (error) returned when the lock does not exist.\n\n#### DM.PLOCKLEASE\n\nDM.PLOCKLEASE sets or updates the timeout of the acquired lock for the given key. It returns `NOSUCHLOCK` if there is no lock for the given key.\n\nDM.PLOCKLEASE accepts milliseconds as timeout.\n\n```\nDM.LOCKLEASE dmap key token milliseconds\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.PLOCKLEASE dmap key 2363ec600be286cb10fbb35181efb029 1000\nOK\n```\n\n**Return:**\n\n* **Simple string reply:** OK if DM.PLOCKLEASE was executed correctly.\n* **NOSUCHLOCK**: (error) returned when the lock does not exist.\n\n#### DM.SCAN\n\nDM.SCAN is a cursor based iterator. This means that at every call of the command, the server returns an updated cursor \nthat the user needs to use as the cursor argument in the next call.\n\nAn iteration starts when the cursor is set to 0, and terminates when the cursor returned by the server is 0. The iterator runs\nlocally on every partition. So you need to know the partition count. If the returned cursor is 0 for a particular partition,\nyou have to start scanning the next partition. \n\n```\nDM.SCAN partID dmap cursor [ MATCH pattern | COUNT count ]\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> DM.SCAN 3 bench 0\n1) \"96990\"\n2)  1) \"memtier-2794837\"\n    2) \"memtier-8630933\"\n    3) \"memtier-6415429\"\n    4) \"memtier-7808686\"\n    5) \"memtier-3347072\"\n    6) \"memtier-4247791\"\n    7) \"memtier-3931982\"\n    8) \"memtier-7164719\"\n    9) \"memtier-4710441\"\n   10) \"memtier-8892916\"\n127.0.0.1:3320> DM.SCAN 3 bench 96990\n1) \"193499\"\n2)  1) \"memtier-429905\"\n    2) \"memtier-1271812\"\n    3) \"memtier-7835776\"\n    4) \"memtier-2717575\"\n    5) \"memtier-95312\"\n    6) \"memtier-2155214\"\n    7) \"memtier-123931\"\n    8) \"memtier-2902510\"\n    9) \"memtier-2632291\"\n   10) \"memtier-1938450\"\n```\n### Publish-Subscribe\n\n**SUBSCRIBE**, **UNSUBSCRIBE** and **PUBLISH** implement the Publish/Subscribe messaging paradigm where \nsenders are not programmed to send their messages to specific receivers. Rather, published messages are characterized \ninto channels, without knowledge of what (if any) subscribers there may be. Subscribers express interest in one or more \nchannels, and only receive messages that are of interest, without knowledge of what (if any) publishers there are. \nThis decoupling of publishers and subscribers can allow for greater scalability and a more dynamic network topology.\n\n**Important note:** In an Olric cluster, clients can subscribe to every node, and can also publish to every other node. The cluster\nwill make sure that published messages are forwarded as needed.\n\n*Source of this section: [https://redis.io/commands/?group=pubsub](https://redis.io/commands/?group=pubsub)*\n\n#### SUBSCRIBE\n\nSubscribes the client to the specified channels.\n\n```\nSUBSCRIBE channel [channel...]\n```\n\nOnce the client enters the subscribed state it is not supposed to issue any other commands, except for additional **SUBSCRIBE**, \n**PSUBSCRIBE**, **UNSUBSCRIBE**, **PUNSUBSCRIBE**, **PING**, and **QUIT** commands.\n\n#### PSUBSCRIBE\n\nSubscribes the client to the given patterns.\n\n```\nPSUBSCRIBE pattern [ pattern ...]\n```\n\nSupported glob-style patterns:\n\n* `h?llo` subscribes to hello, hallo and hxllo\n* `h*llo` subscribes to hllo and heeeello\n* `h[ae]llo` subscribes to hello and hallo, but not hillo\n* Use **\\\\** to escape special characters if you want to match them verbatim.\n\n#### UNSUBSCRIBE\n\nUnsubscribes the client from the given channels, or from all of them if none is given.\n\n```\nUNSUBSCRIBE [channel [channel ...]]\n```\n\nWhen no channels are specified, the client is unsubscribed from all the previously subscribed channels. In this case, \na message for every unsubscribed channel will be sent to the client.\n\n#### PUNSUBSCRIBE\n\nUnsubscribes the client from the given patterns, or from all of them if none is given.\n\n```\nPUNSUBSCRIBE [pattern [pattern ...]]\n```\n\nWhen no patterns are specified, the client is unsubscribed from all the previously subscribed patterns. In this case, \na message for every unsubscribed pattern will be sent to the client.\n\n#### PUBSUB CHANNELS\n\nLists the currently active channels.\n\n```\nPUBSUB CHANNELS [pattern]\n```\n\nAn active channel is a Pub/Sub channel with one or more subscribers (excluding clients subscribed to patterns).\n\nIf no pattern is specified, all the channels are listed, otherwise if pattern is specified only channels matching the \nspecified glob-style pattern are listed.\n\n#### PUBSUB NUMPAT\n\nReturns the number of unique patterns that are subscribed to by clients (that are performed using the PSUBSCRIBE command).\n\n```\nPUBSUB NUMPAT\n```\n\nNote that this isn't the count of clients subscribed to patterns, but the total number of unique patterns all the clients are subscribed to.\n\n**Important note**: In an Olric cluster, clients can subscribe to every node, and can also publish to every other node. The cluster \nwill make sure that published messages are forwarded as needed. That said, PUBSUB's replies in a cluster only report information \nfrom the node's Pub/Sub context, rather than the entire cluster.\n\n#### PUBSUB NUMSUB\n\nReturns the number of subscribers (exclusive of clients subscribed to patterns) for the specified channels.\n\n```\nPUBSUB NUMSUB [channel [channel ...]]\n```\nNote that it is valid to call this command without channels. In this case it will just return an empty list.\n\n**Important note**: In an Olric cluster, clients can subscribe to every node, and can also publish to every other node. The cluster \nwill make sure that published messages are forwarded as needed. That said, PUBSUB's replies in a cluster only report information \nfrom the node's Pub/Sub context, rather than the entire cluster.\n\n#### QUIT\n\nAsk the server to close the connection. The connection is closed as soon as all pending replies have been written to the client.\n\n```\nQUIT\n```\n### Cluster\n\n#### CLUSTER.ROUTINGTABLE\n\nCLUSTER.ROUTINGTABLE returns the latest view of the routing table. Simply, it's a data structure that maps\npartitions to members.\n\n```\nCLUSTER.ROUTINGTABLE\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> CLUSTER.ROUTINGTABLE\n 1) 1) (integer) 0\n     2) 1) \"127.0.0.1:3320\"\n     3) (empty array)\n  2) 1) (integer) 1\n     2) 1) \"127.0.0.1:3320\"\n     3) (empty array)\n  3) 1) (integer) 2\n     2) 1) \"127.0.0.1:3320\"\n     3) (empty array)\n```\n\nIt returns an array of arrays. \n\n**Fields:**\n\n```\n1) (integer) 0 <- Partition ID\n  2) 1) \"127.0.0.1:3320\" <- Array of the current and previous primary owners\n  3) (empty array) <- Array of backup owners. \n```\n\n#### CLUSTER.MEMBERS\n\nCLUSTER.MEMBERS returns an array of known members by the server.\n\n```\nCLUSTER.MEMBERS\n```\n\n**Example:**\n\n```\n127.0.0.1:3320> CLUSTER.MEMBERS\n1) 1) \"127.0.0.1:3320\"\n   2) (integer) 1652619388427137000\n   3) \"true\"\n```\n\n**Fields:**\n\n```\n1) 1) \"127.0.0.1:3320\" <- Member's name in the cluster\n   2) (integer) 1652619388427137000 <-Member's birthedate\n   3) \"true\" <- Is cluster coordinator (the oldest node)\n```\n\n### Others\n\n#### PING\n\nReturns PONG if no argument is provided, otherwise return a copy of the argument as a bulk. This command is often used to\ntest if a connection is still alive, or to measure latency.\n\n```\nPING\n```\n\n#### STATS\n\nThe STATS command returns information and statistics about the server in JSON format. See `stats/stats.go` file.\n\n```\n127.0.0.1:3320> STATS\n<a large string in JSON format>\n```\n\n#### AUTH\n\n`AUTH` authenticates the client using the given password:\n\n```\n127.0.0.1:3320> AUTH your-password\nOK\n```\n\nUnauthenticated clients get `NOAUTH` error:\n\n```\n127.0.0.1:3320> DMAP.PUT dmap key value\n(error) NOAUTH Authentication required.\n```\n\nIf you try to authenticate the client but the server is not configured, Olric returns the following error:\n\n```\n127.0.0.1:3320> AUTH your-password\n(error) ERR AUTH <password> called without any password configured for the default user. Are you sure your configuration is correct?\n```\n\n## Configuration\n\nOlric supports both declarative and programmatic configurations. You can choose one of them depending on your needs.\nYou should feel free to ask any questions about configuration and integration. Please see [Support](#support) section.\n\n### Embedded-Member Mode\n\n#### Programmatic Configuration\nOlric provides a function to generate default configuration to use in embedded-member mode:\n\n```go\nimport \"github.com/olric-data/olric/config\"\n...\nc := config.New(\"local\")\n```\n\nThe `New` function takes a parameter called `env`. It denotes the network environment and consumed by [hashicorp/memberlist](https://github.com/hashicorp/memberlist). \nDefault configuration is good enough for distributed caching scenario. In order to see all configuration parameters, please take a look at [this](https://godoc.org/github.com/olric-data/olric/config).\n\nSee [Sample Code](#sample-code) section for an introduction.\n\n#### Declarative configuration with YAML format\n\nYou can also import configuration from a YAML file by using the `Load` function:\n\n```go\nc, err := config.Load(path/to/olric.yaml)\n```\n\nA sample configuration file in YAML format can be found [here](https://github.com/olric-data/olric/blob/master/cmd/olric-server/olric-server.yaml). This may be the most appropriate way to manage the Olric configuration.\n\n\n### Client-Server Mode\n\nOlric provides **olric-server** to implement client-server mode. olric-server gets a YAML file for the configuration. The most basic  functionality of olric-server is that \ntranslating YAML configuration into Olric's configuration struct. A sample `olric-server.yaml` file  is being provided [here](https://github.com/olric-data/olric/blob/master/cmd/olric-server/olric-server.yaml).\n\n### Network Configuration\n\nIn an Olric instance, there are two different TCP servers. One for Olric, and the other one is for memberlist. `BindAddr` is very\ncritical to deploy a healthy Olric node. There are different scenarios:\n\n* You can freely set a domain name or IP address as `BindAddr` for both Olric and memberlist. Olric will resolve and use it to bind.\n* You can freely set `localhost`, `127.0.0.1` or `::1` as `BindAddr` in development environment for both Olric and memberlist.\n* You can freely set `0.0.0.0` as `BindAddr` for both Olric and memberlist. Olric will pick an IP address, if there is any.\n* If you don't set `BindAddr`, hostname will be used, and it will be resolved to get a valid IP address.\n* You can set a network interface by using `Config.Interface` and `Config.MemberlistInterface` fields. Olric will find an appropriate IP address for the given interfaces, if there is any.\n* You can set both `BindAddr` and interface parameters. In this case Olric will ensure that `BindAddr` is available on the given interface.\n\nYou should know that Olric needs a single and stable IP address to function properly. If you don't know the IP address of the host at the deployment time, \nyou can set `BindAddr` as `0.0.0.0`. Olric will very likely to find an IP address for you.\n\n### Service Discovery\n\nOlric provides a service discovery interface which can be used to implement plugins. \n\nWe currently have a bunch of service discovery plugins for automatic peer discovery on cloud environments:\n\n* [olric-data/olric-consul-plugin](https://github.com/olric-data/olric-consul-plugin) provides a plugin using Consul.\n* [olric-data/olric-cloud-plugin](https://github.com/olric-data/olric-cloud-plugin) provides a plugin for well-known cloud providers. Including Kubernetes.\n* [justinfx/olric-nats-plugin](https://github.com/justinfx/olric-nats-plugin) provides a plugin using nats.io\n\nIn order to get more info about installation and configuration of the plugins, see their GitHub page. \n\n### Timeouts\n\nOlric nodes supports setting `KeepAlivePeriod` on TCP sockets. \n\n**Server-side:**\n\n##### config.KeepAlivePeriod \n\nKeepAlivePeriod denotes whether the operating system should send keep-alive messages on the connection.\n\n**Client-side:**\n \n##### config.DialTimeout\n\nTimeout for TCP dial. The timeout includes name resolution, if required. When using TCP, and the host in the address \nparameter resolves to multiple IP addresses, the timeout is spread over each consecutive dial, such that each is\ngiven an appropriate fraction of the time to connect.\n\n##### config.ReadTimeout\n\nTimeout for socket reads. If reached, commands will fail with a timeout instead of blocking. Use value -1 for no \ntimeout and 0 for default. The default is config.DefaultReadTimeout\n\n##### config.WriteTimeout\n\nTimeout for socket writes. If reached, commands will fail with a timeout instead of blocking. The default is config.DefaultWriteTimeout\n\n## Architecture\n\n### Overview\n\nOlric uses:\n* [hashicorp/memberlist](https://github.com/hashicorp/memberlist) for cluster membership and failure detection,\n* [buraksezer/consistent](https://github.com/buraksezer/consistent) for consistent hashing and load balancing,\n* [Redis Serialization Protocol](https://github.com/tidwall/redcon) for communication.\n\nOlric distributes data among partitions. Every partition is being owned by a cluster member and may have one or more backups for redundancy. \nWhen you read or write a DMap entry, you transparently talk to the partition owner. Each request hits the most up-to-date version of a\nparticular data entry in a stable cluster.\n\nIn order to find the partition which the key belongs to, Olric hashes the key and mod it with the number of partitions:\n\n```\npartID = MOD(hash result, partition count)\n```\n\nThe partitions are being distributed among cluster members by using a consistent hashing algorithm. In order to get details, please see [buraksezer/consistent](https://github.com/buraksezer/consistent). \n\nWhen a new cluster is created, one of the instances is elected as the **cluster coordinator**. It manages the partition table: \n\n* When a node joins or leaves, it distributes the partitions and their backups among the members again,\n* Removes empty previous owners from the partition owners list,\n* Pushes the new partition table to all the members,\n* Pushes the partition table to the cluster periodically.\n\nMembers propagate their birthdate(POSIX time in nanoseconds) to the cluster. The coordinator is the oldest member in the cluster.\nIf the coordinator leaves the cluster, the second oldest member gets elected as the coordinator.\n\nOlric has a component called **rebalancer** which is responsible for keeping underlying data structures consistent:\n\n* Works on every node,\n* When a node joins or leaves, the cluster coordinator pushes the new partition table. Then, the **rebalancer** runs immediately and moves the partitions and backups to their new hosts,\n* Merges fragmented partitions.\n\nPartitions have a concept called **owners list**. When a node joins or leaves the cluster, a new primary owner may be assigned by the \ncoordinator. At any time, a partition may have one or more partition owners. If a partition has two or more owners, this is called **fragmented partition**. \nThe last added owner is called **primary owner**. Write operation is only done by the primary owner. The previous owners are only used for read and delete.\n\nWhen you read a key, the primary owner tries to find the key on itself, first. Then, queries the previous owners and backups, respectively.\nThe delete operation works the same way.\n\nThe data(distributed map objects) in the fragmented partition is moved slowly to the primary owner by the **rebalancer**. Until the move is done,\nthe data remains available on the previous owners. The DMap methods use this list to query data on the cluster.\n\n*Please note that, 'multiple partition owners' is an undesirable situation and the **rebalancer** component is designed to fix that in a short time.*\n\n### Consistency and Replication Model\n\n**Olric is an AP product** in the context of [CAP theorem](https://en.wikipedia.org/wiki/CAP_theorem), which employs the combination of primary-copy \nand [optimistic replication](https://en.wikipedia.org/wiki/Optimistic_replication) techniques. With optimistic replication, when the partition owner \nreceives a write or delete operation for a key, applies it locally, and propagates it to the backup owners.\n\nThis technique enables Olric clusters to offer high throughput. However, due to temporary situations in the system, such as network\nfailure, backup owners can miss some updates and diverge from the primary owner. If a partition owner crashes while there is an\ninconsistency between itself and the backups, strong consistency of the data can be lost.\n\nTwo types of backup replication are available: **sync** and **async**. Both types are still implementations of the optimistic replication\nmodel.\n\n* **sync**: Blocks until write/delete operation is applied by backup owners.\n* **async**: Just fire & forget.\n\n#### Last-write-wins conflict resolution\n\nEvery time a piece of data is written to Olric, a timestamp is attached by the client. Then, when Olric has to deal with conflict data in the case \nof network partitioning, it simply chooses the data with the most recent timestamp. This called LWW conflict resolution policy.\n\n#### PACELC Theorem\n\nFrom Wikipedia:\n\n> In theoretical computer science, the [PACELC theorem](https://en.wikipedia.org/wiki/PACELC_theorem) is an extension to the [CAP theorem](https://en.wikipedia.org/wiki/CAP_theorem). It states that in case of network partitioning (P) in a \n> distributed computer system, one has to choose between availability (A) and consistency (C) (as per the CAP theorem), but else (E), even when the system is \n> running normally in the absence of partitions, one has to choose between latency (L) and consistency (C).\n\nIn the context of PACELC theorem, Olric is a **PA/EC** product. It means that Olric is considered to be **consistent** data store if the network is stable. \nBecause the key space is divided between partitions and every partition is controlled by its primary owner. All operations on DMaps are redirected to the \npartition owner. \n\nIn the case of network partitioning, Olric chooses **availability** over consistency. So that you can still access some parts of the cluster when the network is unreliable, \nbut the cluster may return inconsistent results.  \n\nOlric implements read-repair and quorum based voting system to deal with inconsistencies in the DMaps. \n\nReadings on PACELC theorem:\n* [Please stop calling databases CP or AP](https://martin.kleppmann.com/2015/05/11/please-stop-calling-databases-cp-or-ap.html)\n* [Problems with CAP, and Yahoo’s little known NoSQL system](https://dbmsmusings.blogspot.com/2010/04/problems-with-cap-and-yahoos-little.html)\n* [A Critique of the CAP Theorem](https://arxiv.org/abs/1509.05393)\n* [Hazelcast and the Mythical PA/EC System](https://dbmsmusings.blogspot.com/2017/10/hazelcast-and-mythical-paec-system.html)\n\n#### Read-Repair on DMaps\n\nRead repair is a feature that allows for inconsistent data to be fixed at query time. Olric tracks every write operation with a timestamp value and assumes \nthat the latest write operation is the valid one. When you want to access a key/value pair, the partition owner retrieves all available copies for that pair\nand compares the timestamp values. The latest one is the winner. If there is some outdated version of the requested pair, the primary owner propagates the latest\nversion of the pair. \n\nRead-repair is disabled by default for the sake of performance. If you have a use case that requires a more strict consistency control than a distributed caching \nscenario, you can enable read-repair via the configuration. \n\n#### Quorum-based replica control\n\nOlric implements Read/Write quorum to keep the data in a consistent state. When you start a write operation on the cluster and write quorum (W) is 2, \nthe partition owner tries to write the given key/value pair on its own data storage and on the replica nodes. If the number of successful write operations \nis below W, the primary owner returns `ErrWriteQuorum`. The read flow is the same: if you have R=2 and the owner only access one of the replicas, \nit returns `ErrReadQuorum`.\n\n#### Simple Split-Brain Protection\n\nOlric implements a technique called *majority quorum* to manage split-brain conditions. If a network partitioning occurs, and some members\nlost the connection to rest of the cluster, they immediately stops functioning and return an error to incoming requests. This behaviour is controlled by\n`MemberCountQuorum` parameter. It's default `1`. \n\nWhen the network healed, the stopped nodes joins again the cluster and fragmented partitions is merged by their primary owners in accordance with \n*LWW policy*. Olric also implements an *ownership report* mechanism to fix inconsistencies in partition distribution after a partitioning event. \n\n### Eviction\nOlric supports different policies to evict keys from distributed maps. \n\n#### Expire with TTL\nOlric implements TTL eviction policy. It shares the same algorithm with [Redis](https://redis.io/commands/expire#appendix-redis-expires):\n\n> Periodically Redis tests a few keys at random among keys with an expire set. All the keys that are already expired are deleted from the keyspace.\n>\n> Specifically this is what Redis does 10 times per second:\n>\n> * Test 20 random keys from the set of keys with an associated expire.\n> * Delete all the keys found expired.\n> * If more than 25% of keys were expired, start again from step 1.\n>\n> This is a trivial probabilistic algorithm, basically the assumption is that our sample is representative of the whole key space, and we continue to expire until the percentage of keys that are likely to be expired is under 25%\n\nWhen a client tries to access a key, Olric returns `ErrKeyNotFound` if the key is found to be timed out. A background task evicts keys with the algorithm described above.\n\n#### Expire with MaxIdleDuration\n\nMaximum time for each entry to stay idle in the DMap. It limits the lifetime of the entries relative to the time of the last read \nor write access performed on them. The entries whose idle period exceeds this limit are expired and evicted automatically. \nAn entry is idle if no Get, Put, PutEx, Expire, PutIf, PutIfEx on it. Configuration of MaxIdleDuration feature varies by \npreferred deployment method. \n\n#### Expire with LRU\n\nOlric implements LRU eviction method on DMaps. Approximated LRU algorithm is borrowed from Redis. The Redis authors proposes the following algorithm:\n\n> It is important to understand that the eviction process works like this:\n> \n> * A client runs a new command, resulting in more data added.\n> * Redis checks the memory usage, and if it is greater than the maxmemory limit , it evicts keys according to the policy.\n> * A new command is executed, and so forth.\n>\n> So we continuously cross the boundaries of the memory limit, by going over it, and then by evicting keys to return back under the limits.\n>\n> If a command results in a lot of memory being used (like a big set intersection stored into a new key) for some time the memory \n> limit can be surpassed by a noticeable amount. \n>\n> **Approximated LRU algorithm**\n>\n> Redis LRU algorithm is not an exact implementation. This means that Redis is not able to pick the best candidate for eviction, \n> that is, the access that was accessed the most in the past. Instead it will try to run an approximation of the LRU algorithm, \n> by sampling a small number of keys, and evicting the one that is the best (with the oldest access time) among the sampled keys.\n\nOlric tracks access time for every DMap instance. Then it picks and sorts some configurable amount of keys to select keys for eviction.\nEvery node runs this algorithm independently. The access log is moved along with the partition when a network partition is occured.\n\n#### Configuration of eviction mechanisms\n\nHere is a simple configuration block for `olric-server.yaml`: \n\n```\ncache:\n  numEvictionWorkers: 1\n  maxIdleDuration: \"\"\n  ttlDuration: \"100s\"\n  maxKeys: 100000\n  maxInuse: 1000000 # in bytes\n  lRUSamples: 10\n  evictionPolicy: \"LRU\" # NONE/LRU\n```\n\nYou can also set cache configuration per DMap. Here is a simple configuration for a DMap named `mydmap`:\n\n```\ndmaps:\n  mydmap:\n    maxIdleDuration: \"60s\"\n    ttlDuration: \"300s\"\n    maxKeys: 500000 # in-bytes\n    lRUSamples: 20\n    evictionPolicy: \"NONE\" # NONE/LRU\n```\n\nIf you prefer embedded-member deployment scenario, please take a look at [config#CacheConfig](https://godoc.org/github.com/olric-data/olric/config#CacheConfig) and [config#DMapCacheConfig](https://godoc.org/github.com/olric-data/olric/config#DMapCacheConfig) for the configuration.\n\n\n### Lock Implementation\n\nThe DMap implementation is already thread-safe to meet your thread safety requirements. When you want to have more control on the\nconcurrency, you can use **LockWithTimeout** and **Lock** methods. Olric borrows the locking algorithm from Redis. Redis authors propose\nthe following algorithm:\n\n> The command <SET resource-name anystring NX EX max-lock-time> is a simple way to implement a locking system with Redis.\n>\n> A client can acquire the lock if the above command returns OK (or retry after some time if the command returns Nil), and remove the lock just using DEL.\n>\n> The lock will be auto-released after the expire time is reached.\n>\n> It is possible to make this system more robust modifying the unlock schema as follows:\n>\n> Instead of setting a fixed string, set a non-guessable large random string, called token.\n> Instead of releasing the lock with DEL, send a script that only removes the key if the value matches.\n> This avoids that a client will try to release the lock after the expire time deleting the key created by another client that acquired the lock later.\n\nEquivalent of`SETNX` command in Olric is `PutIf(key, value, IfNotFound)`. Lock and LockWithTimeout commands are properly implements\nthe algorithm which is proposed above. \n\nYou should know that this implementation is subject to the clustering algorithm. So there is no guarantee about reliability in the case of network partitioning. I recommend the lock implementation to be used for \nefficiency purposes in general, instead of correctness.\n\n**Important note about consistency:**\n\nYou should know that Olric is a PA/EC (see [Consistency and Replication Model](#consistency-and-replication-model)) product. So if your network is stable, all the operations on key/value \npairs are performed by a single cluster member. It means that you can be sure about the consistency when the cluster is stable. It's important to know that computer networks fail \noccasionally, processes crash and random GC pauses may happen. Many factors can lead a network partitioning. If you cannot tolerate losing strong consistency under network partitioning, \nyou need to use a different tool for locking.\n\nSee [Hazelcast and the Mythical PA/EC System](https://dbmsmusings.blogspot.com/2017/10/hazelcast-and-mythical-paec-system.html) and [Jepsen Analysis on Hazelcast 3.8.3](https://hazelcast.com/blog/jepsen-analysis-hazelcast-3-8-3/) for more insight on this topic.\n             \n### Storage Engine\n\nOlric implements a GC-friendly storage engine to store large amounts of data on RAM. Basically, it applies an append-only log file approach with indexes. \nOlric inserts key/value pairs into pre-allocated byte slices (table in Olric terminology) and indexes that memory region by using Golang's built-in map. \nThe data type of this map is `map[uint64]uint64`. When a pre-allocated byte slice is full Olric allocates a new one and continues inserting the new data into it. \nThis design greatly reduces the write latency.\n\nWhen you want to read a key/value pair from the Olric cluster, it scans the related DMap fragment by iterating over the indexes(implemented by the built-in map). \nThe number of allocated byte slices should be small. So Olric would find the key immediately but technically, the read performance depends on the number of keys in the fragment. \nThe effect of this design on the read performance is negligible.\n\nThe size of the pre-allocated byte slices is configurable.\n\n## Samples\n\nIn this section, you can find code snippets for various scenarios.\n\n### Embedded-member scenario\n#### Distributed map\n```go\npackage main\n\nimport (\n  \"context\"\n  \"fmt\"\n  \"log\"\n  \"time\"\n\n  \"github.com/olric-data/olric\"\n  \"github.com/olric-data/olric/config\"\n)\n\nfunc main() {\n  // Sample for Olric v0.7.x\n\n  // Deployment scenario: embedded-member\n  // This creates a single-node Olric cluster. It's good enough for experimenting.\n\n  // config.New returns a new config.Config with sane defaults. Available values for env:\n  // local, lan, wan\n  c := config.New(\"local\")\n\n  // Callback function. It's called when this node is ready to accept connections.\n  ctx, cancel := context.WithCancel(context.Background())\n  c.Started = func() {\n    defer cancel()\n    log.Println(\"[INFO] Olric is ready to accept connections\")\n  }\n\n  // Create a new Olric instance.\n  db, err := olric.New(c)\n  if err != nil {\n    log.Fatalf(\"Failed to create Olric instance: %v\", err)\n  }\n\n  // Start the instance. It will form a single-node cluster.\n  go func() {\n    // Call Start at background. It's a blocker call.\n    err = db.Start()\n    if err != nil {\n      log.Fatalf(\"olric.Start returned an error: %v\", err)\n    }\n  }()\n\n  <-ctx.Done()\n\n  // In embedded-member scenario, you can use the EmbeddedClient. It implements\n  // the Client interface.\n  e := db.NewEmbeddedClient()\n\n  dm, err := e.NewDMap(\"bucket-of-arbitrary-items\")\n  if err != nil {\n    log.Fatalf(\"olric.NewDMap returned an error: %v\", err)\n  }\n\n  ctx, cancel = context.WithCancel(context.Background())\n\n  // Magic starts here!\n  fmt.Println(\"##\")\n  fmt.Println(\"Simple Put/Get on a DMap instance:\")\n  err = dm.Put(ctx, \"my-key\", \"Olric Rocks!\")\n  if err != nil {\n    log.Fatalf(\"Failed to call Put: %v\", err)\n  }\n\n  gr, err := dm.Get(ctx, \"my-key\")\n  if err != nil {\n    log.Fatalf(\"Failed to call Get: %v\", err)\n  }\n\n  // Olric uses the Redis serialization format.\n  value, err := gr.String()\n  if err != nil {\n    log.Fatalf(\"Failed to read Get response: %v\", err)\n  }\n\n  fmt.Println(\"Response for my-key:\", value)\n  fmt.Println(\"##\")\n\n  // Don't forget the call Shutdown when you want to leave the cluster.\n  ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)\n  defer cancel()\n\n  err = db.Shutdown(ctx)\n  if err != nil {\n    log.Printf(\"Failed to shutdown Olric: %v\", err)\n  }\n}\n```\n\n#### Publish-Subscribe\n\n```go\npackage main\n\nimport (\n  \"context\"\n  \"fmt\"\n  \"log\"\n  \"time\"\n\n  \"github.com/olric-data/olric\"\n  \"github.com/olric-data/olric/config\"\n)\n\nfunc main() {\n  // Sample for Olric v0.7.x\n\n  // Deployment scenario: embedded-member\n  // This creates a single-node Olric cluster. It's good enough for experimenting.\n\n  // config.New returns a new config.Config with sane defaults. Available values for env:\n  // local, lan, wan\n  c := config.New(\"local\")\n\n  // Callback function. It's called when this node is ready to accept connections.\n  ctx, cancel := context.WithCancel(context.Background())\n  c.Started = func() {\n    defer cancel()\n    log.Println(\"[INFO] Olric is ready to accept connections\")\n  }\n\n  // Create a new Olric instance.\n  db, err := olric.New(c)\n  if err != nil {\n    log.Fatalf(\"Failed to create Olric instance: %v\", err)\n  }\n\n  // Start the instance. It will form a single-node cluster.\n  go func() {\n    // Call Start at background. It's a blocker call.\n    err = db.Start()\n    if err != nil {\n      log.Fatalf(\"olric.Start returned an error: %v\", err)\n    }\n  }()\n\n  <-ctx.Done()\n\n  // In embedded-member scenario, you can use the EmbeddedClient. It implements\n  // the Client interface.\n  e := db.NewEmbeddedClient()\n\n  ps, err := e.NewPubSub()\n  if err != nil {\n    log.Fatalf(\"olric.NewPubSub returned an error: %v\", err)\n  }\n\n  ctx, cancel = context.WithCancel(context.Background())\n\n  // Olric implements a drop-in replacement of Redis Publish-Subscribe messaging\n  // system. PubSub client is just a thin layer around go-redis/redis.\n  rps := ps.Subscribe(ctx, \"my-channel\")\n\n  // Get a message to read messages from my-channel\n  msg := rps.Channel()\n\n  go func() {\n    // Publish a message here.\n    _, err := ps.Publish(ctx, \"my-channel\", \"Olric Rocks!\")\n    if err != nil {\n      log.Fatalf(\"PubSub.Publish returned an error: %v\", err)\n    }\n  }()\n\n  // Consume messages\n  rm := <-msg\n\n  fmt.Printf(\"Received message: \\\"%s\\\" from \\\"%s\\\"\", rm.Channel, rm.Payload)\n\n  // Don't forget the call Shutdown when you want to leave the cluster.\n  ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)\n  defer cancel()\n\n  err = e.Close(ctx)\n  if err != nil {\n    log.Printf(\"Failed to close EmbeddedClient: %v\", err)\n  }\n}\n```\n\n### Client-Server scenario\n#### Distributed map\n\n```go\npackage main\n\nimport (\n  \"context\"\n  \"fmt\"\n  \"log\"\n  \"time\"\n\n  \"github.com/olric-data/olric\"\n)\n\nfunc main() {\n  // Sample for Olric v0.7.x\n\n  // Deployment scenario: client-server\n\n  // NewClusterClient takes a list of the nodes. This list may only contain a\n  // load balancer address. Please note that Olric nodes will calculate the partition owner\n  // and proxy the incoming requests.\n  c, err := olric.NewClusterClient([]string{\"localhost:3320\"})\n  if err != nil {\n    log.Fatalf(\"olric.NewClusterClient returned an error: %v\", err)\n  }\n\n  // In client-server scenario, you can use the ClusterClient. It implements\n  // the Client interface.\n  dm, err := c.NewDMap(\"bucket-of-arbitrary-items\")\n  if err != nil {\n    log.Fatalf(\"olric.NewDMap returned an error: %v\", err)\n  }\n\n  ctx, cancel := context.WithCancel(context.Background())\n\n  // Magic starts here!\n  fmt.Println(\"##\")\n  fmt.Println(\"Simple Put/Get on a DMap instance:\")\n  err = dm.Put(ctx, \"my-key\", \"Olric Rocks!\")\n  if err != nil {\n    log.Fatalf(\"Failed to call Put: %v\", err)\n  }\n\n  gr, err := dm.Get(ctx, \"my-key\")\n  if err != nil {\n    log.Fatalf(\"Failed to call Get: %v\", err)\n  }\n\n  // Olric uses the Redis serialization format.\n  value, err := gr.String()\n  if err != nil {\n    log.Fatalf(\"Failed to read Get response: %v\", err)\n  }\n\n  fmt.Println(\"Response for my-key:\", value)\n  fmt.Println(\"##\")\n\n  // Don't forget the call Shutdown when you want to leave the cluster.\n  ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)\n  defer cancel()\n\n  err = c.Close(ctx)\n  if err != nil {\n    log.Printf(\"Failed to close ClusterClient: %v\", err)\n  }\n}\n```\n\n### SCAN on DMaps\n\n```go\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric\"\n\t\"github.com/olric-data/olric/config\"\n)\n\nfunc main() {\n\t// Sample for Olric v0.7.x\n\n\t// Deployment scenario: embedded-member\n\t// This creates a single-node Olric cluster. It's good enough for experimenting.\n\n\t// config.New returns a new config.Config with sane defaults. Available values for env:\n\t// local, lan, wan\n\tc := config.New(\"local\")\n\n\t// Callback function. It's called when this node is ready to accept connections.\n\tctx, cancel := context.WithCancel(context.Background())\n\tc.Started = func() {\n\t\tdefer cancel()\n\t\tlog.Println(\"[INFO] Olric is ready to accept connections\")\n\t}\n\n\t// Create a new Olric instance.\n\tdb, err := olric.New(c)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create Olric instance: %v\", err)\n\t}\n\n\t// Start the instance. It will form a single-node cluster.\n\tgo func() {\n\t\t// Call Start at background. It's a blocker call.\n\t\terr = db.Start()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"olric.Start returned an error: %v\", err)\n\t\t}\n\t}()\n\n\t<-ctx.Done()\n\n\t// In embedded-member scenario, you can use the EmbeddedClient. It implements\n\t// the Client interface.\n\te := db.NewEmbeddedClient()\n\n\tdm, err := e.NewDMap(\"bucket-of-arbitrary-items\")\n\tif err != nil {\n\t\tlog.Fatalf(\"olric.NewDMap returned an error: %v\", err)\n\t}\n\n\tctx, cancel = context.WithCancel(context.Background())\n\n\t// Magic starts here!\n\tfmt.Println(\"##\")\n\tfmt.Println(\"Insert 10 keys\")\n\tvar key string\n\tfor i := 0; i < 10; i++ {\n\t\tif i%2 == 0 {\n\t\t\tkey = fmt.Sprintf(\"even:%d\", i)\n\t\t} else {\n\t\t\tkey = fmt.Sprintf(\"odd:%d\", i)\n\t\t}\n\t\terr = dm.Put(ctx, key, nil)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to call Put: %v\", err)\n\t\t}\n\t}\n\n\ti, err := dm.Scan(ctx)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to call Scan: %v\", err)\n\t}\n\n\tfmt.Println(\"Iterate over all the keys\")\n\tfor i.Next() {\n\t\tfmt.Println(\">> Key\", i.Key())\n\t}\n\n\ti.Close()\n\n\ti, err = dm.Scan(ctx, olric.Match(\"^even:\"))\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to call Scan: %v\", err)\n\t}\n\n\tfmt.Println(\"\\n\\nScan with regex: ^even:\")\n\tfor i.Next() {\n\t\tfmt.Println(\">> Key\", i.Key())\n\t}\n\n\ti.Close()\n\n\t// Don't forget the call Shutdown when you want to leave the cluster.\n\tctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\terr = db.Shutdown(ctx)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to shutdown Olric: %v\", err)\n\t}\n}\n```\n\n#### Publish-Subscribe\n```go\npackage main\n\nimport (\n  \"context\"\n  \"fmt\"\n  \"log\"\n  \"time\"\n\n  \"github.com/olric-data/olric\"\n)\n\nfunc main() {\n  // Sample for Olric v0.7.x\n\n  // Deployment scenario: client-server\n\n  // NewClusterClient takes a list of the nodes. This list may only contain a\n  // load balancer address. Please note that Olric nodes will calculate the partition owner\n  // and proxy the incoming requests.\n  c, err := olric.NewClusterClient([]string{\"localhost:3320\"})\n  if err != nil {\n    log.Fatalf(\"olric.NewClusterClient returned an error: %v\", err)\n  }\n\n  // In client-server scenario, you can use the ClusterClient. It implements\n  // the Client interface.\n  ps, err := c.NewPubSub()\n  if err != nil {\n    log.Fatalf(\"olric.NewPubSub returned an error: %v\", err)\n  }\n\n  ctx, cancel := context.WithCancel(context.Background())\n\n  // Olric implements a drop-in replacement of Redis Publish-Subscribe messaging\n  // system. PubSub client is just a thin layer around go-redis/redis.\n  rps := ps.Subscribe(ctx, \"my-channel\")\n\n  // Get a message to read messages from my-channel\n  msg := rps.Channel()\n\n  go func() {\n    // Publish a message here.\n    _, err := ps.Publish(ctx, \"my-channel\", \"Olric Rocks!\")\n    if err != nil {\n      log.Fatalf(\"PubSub.Publish returned an error: %v\", err)\n    }\n  }()\n\n  // Consume messages\n  rm := <-msg\n\n  fmt.Printf(\"Received message: \\\"%s\\\" from \\\"%s\\\"\", rm.Channel, rm.Payload)\n\n  // Don't forget the call Shutdown when you want to leave the cluster.\n  ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)\n  defer cancel()\n\n  err = c.Close(ctx)\n  if err != nil {\n    log.Printf(\"Failed to close ClusterClient: %v\", err)\n  }\n}\n\n```\n\n## Contributions\n\nPlease don't hesitate to fork the project and send a pull request or just e-mail me to ask questions and share ideas.\n\n## License\n\nThe Apache License, Version 2.0 - see LICENSE for more details.\n\n## About the name\n\nThe inner voice of Turgut Özben who is the main character of [Oğuz Atay's masterpiece -The Disconnected-](https://www.themodernnovel.org/asia/other-asia/turkey/oguz-atay/the-disconnected/).\n"
  },
  {
    "path": "auth.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"errors\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/tidwall/redcon\"\n)\n\n// authCommandHandler handles authentication requests sent by clients and verifies the provided password for access.\nfunc (db *Olric) authCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tauthCmd, err := protocol.ParseAuthCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tif !db.config.Authentication.Enabled() {\n\t\tprotocol.WriteError(conn, errors.New(\"AUTH <password> called without any password configured for the default user. Are you sure your configuration is correct?\"))\n\t\treturn\n\t}\n\n\tif authCmd.Password == db.config.Authentication.Password {\n\t\tctx := conn.Context().(*server.ConnContext)\n\t\tctx.SetAuthenticated(true)\n\t\tconn.WriteString(protocol.StatusOK)\n\t\treturn\n\t}\n\tprotocol.WriteError(conn, ErrWrongPass)\n}\n"
  },
  {
    "path": "auth_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestAuthCommandHandler_WithPassword(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\ttestConfig := testutil.NewConfig()\n\ttestConfig.Authentication = &config.Authentication{\n\t\tPassword: \"test-password\",\n\t}\n\tdb := cluster.addMemberWithConfig(t, testConfig)\n\n\texpectedMessage := \"error while discovering the cluster members: wrong password\"\n\tctx := context.Background()\n\tt.Run(\"With correct credentials\", func(t *testing.T) {\n\t\tc, err := NewClusterClient([]string{db.name}, WithPassword(\"test-password\"))\n\t\trequire.NoError(t, err)\n\t\tdefer func() {\n\t\t\trequire.NoError(t, c.Close(ctx))\n\t\t}()\n\n\t\tresponse, err := c.Ping(ctx, db.rt.This().String(), \"\")\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, DefaultPingResponse, response)\n\t})\n\n\tt.Run(\"With wrong credentials\", func(t *testing.T) {\n\t\t_, err := NewClusterClient([]string{db.name}, WithPassword(\"wrong\"))\n\t\trequire.ErrorContains(t, err, expectedMessage)\n\t})\n\n\tt.Run(\"Without credentials\", func(t *testing.T) {\n\t\t_, err := NewClusterClient([]string{db.name}, WithPassword(\"wrong\"))\n\t\trequire.ErrorContains(t, err, expectedMessage)\n\t})\n}\n\nfunc TestAuthCommandHandler_Auth_Disabled(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\t_, err := NewClusterClient([]string{db.name}, WithPassword(\"test-password\"))\n\trequire.ErrorContains(t, err, \"error while discovering the cluster members: AUTH <password> called without any password configured for the default user. Are you sure your configuration is correct?\")\n}\n"
  },
  {
    "path": "client.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/olric-data/olric/stats\"\n)\n\nconst DefaultScanCount = 10\n\n// Member denotes a member of the Olric cluster.\ntype Member struct {\n\t// Member name in the cluster. It's also host:port of the node.\n\tName string\n\n\t// ID of the Member in the cluster. Hash of Name and Birthdate of the member\n\tID uint64\n\n\t// Birthdate of the member in nanoseconds.\n\tBirthdate int64\n\n\t// Role of the member in the cluster. There is only one coordinator member\n\t// in a healthy cluster.\n\tCoordinator bool\n}\n\n// Iterator defines an interface to implement iterators on the distributed maps.\ntype Iterator interface {\n\t// Next returns true if there is more key in the iterator implementation.\n\t// Otherwise, it returns false.\n\tNext() bool\n\n\t// Key returns a key name from the distributed map.\n\tKey() string\n\n\t// Close stops the iteration and releases allocated resources.\n\tClose()\n}\n\n// LockContext interface defines methods to manage locks on distributed maps.\ntype LockContext interface {\n\t// Unlock releases an acquired lock for the given key. It returns ErrNoSuchLock\n\t// if there is no lock for the given key.\n\tUnlock(ctx context.Context) error\n\n\t// Lease sets or updates the timeout of the acquired lock for the given key.\n\t// It returns ErrNoSuchLock if there is no lock for the given key.\n\tLease(ctx context.Context, duration time.Duration) error\n}\n\n// PutOption is a function for define options to control behavior of the Put command.\ntype PutOption func(*dmap.PutConfig)\n\n// EX sets the specified expire time, in seconds.\nfunc EX(ex time.Duration) PutOption {\n\treturn func(cfg *dmap.PutConfig) {\n\t\tcfg.HasEX = true\n\t\tcfg.EX = ex\n\t}\n}\n\n// PX sets the specified expire time, in milliseconds.\nfunc PX(px time.Duration) PutOption {\n\treturn func(cfg *dmap.PutConfig) {\n\t\tcfg.HasPX = true\n\t\tcfg.PX = px\n\t}\n}\n\n// EXAT sets the specified Unix time at which the key will expire, in seconds.\nfunc EXAT(exat time.Duration) PutOption {\n\treturn func(cfg *dmap.PutConfig) {\n\t\tcfg.HasEXAT = true\n\t\tcfg.EXAT = exat\n\t}\n}\n\n// PXAT sets the specified Unix time at which the key will expire, in milliseconds.\nfunc PXAT(pxat time.Duration) PutOption {\n\treturn func(cfg *dmap.PutConfig) {\n\t\tcfg.HasPXAT = true\n\t\tcfg.PXAT = pxat\n\t}\n}\n\n// NX only sets the key if it does not already exist.\nfunc NX() PutOption {\n\treturn func(cfg *dmap.PutConfig) {\n\t\tcfg.HasNX = true\n\t}\n}\n\n// XX only sets the key if it already exists.\nfunc XX() PutOption {\n\treturn func(cfg *dmap.PutConfig) {\n\t\tcfg.HasXX = true\n\t}\n}\n\ntype dmapConfig struct {\n\tstorageEntryImplementation func() storage.Entry\n}\n\n// DMapOption is a function for defining options to control behavior of distributed map instances.\ntype DMapOption func(*dmapConfig)\n\n// StorageEntryImplementation sets and encoder/decoder implementation for your choice of storage engine.\nfunc StorageEntryImplementation(e func() storage.Entry) DMapOption {\n\treturn func(cfg *dmapConfig) {\n\t\tcfg.storageEntryImplementation = e\n\t}\n}\n\n// ScanOption is a function for defining options to control behavior of the SCAN command.\ntype ScanOption func(*dmap.ScanConfig)\n\n// Count is the user specified the amount of work that should be done at every call in order to\n// retrieve elements from the distributed map. This is just a hint for the implementation,\n// however generally speaking this is what you could expect most of the time from the implementation.\n// The default value is 10.\nfunc Count(c int) ScanOption {\n\treturn func(cfg *dmap.ScanConfig) {\n\t\tcfg.HasCount = true\n\t\tcfg.Count = c\n\t}\n}\n\n// Match is used for using regular expressions on keys. See https://pkg.go.dev/regexp\nfunc Match(s string) ScanOption {\n\treturn func(cfg *dmap.ScanConfig) {\n\t\tcfg.HasMatch = true\n\t\tcfg.Match = s\n\t}\n}\n\n// DMap defines methods to access and manipulate distributed maps.\ntype DMap interface {\n\t// Name exposes name of the DMap.\n\tName() string\n\n\t// Put sets the value for the given key. It overwrites any previous value for\n\t// that key, and it's thread-safe. The key has to be a string. value type is arbitrary.\n\t// It is safe to modify the contents of the arguments after Put returns but not before.\n\tPut(ctx context.Context, key string, value interface{}, options ...PutOption) error\n\n\t// Get gets the value for the given key. It returns ErrKeyNotFound if the DB\n\t// does not contain the key. It's thread-safe. It is safe to modify the contents\n\t// of the returned value. See GetResponse for the details.\n\tGet(ctx context.Context, key string) (*GetResponse, error)\n\n\t// Delete deletes values for the given keys. Delete will not return error\n\t// if key doesn't exist. It's thread-safe. It is safe to modify the contents\n\t// of the argument after Delete returns.\n\tDelete(ctx context.Context, keys ...string) (int, error)\n\n\t// Incr atomically increments the key by delta. The return value is the new value\n\t// after being incremented or an error.\n\tIncr(ctx context.Context, key string, delta int) (int, error)\n\n\t// Decr atomically decrements the key by delta. The return value is the new value\n\t// after being decremented or an error.\n\tDecr(ctx context.Context, key string, delta int) (int, error)\n\n\t// GetPut atomically sets the key to value and returns the old value stored at key. It returns nil if there is no\n\t// previous value.\n\tGetPut(ctx context.Context, key string, value interface{}) (*GetResponse, error)\n\n\t// IncrByFloat atomically increments the key by delta. The return value is the new value\n\t// after being incremented or an error.\n\tIncrByFloat(ctx context.Context, key string, delta float64) (float64, error)\n\n\t// Expire updates the expiry for the given key. It returns ErrKeyNotFound if\n\t// the DB does not contain the key. It's thread-safe.\n\tExpire(ctx context.Context, key string, timeout time.Duration) error\n\n\t// Lock sets a lock for the given key. Acquired lock is only for the key in\n\t// this dmap.\n\t//\n\t// It returns immediately if it acquires the lock for the given key. Otherwise,\n\t// it waits until deadline.\n\t//\n\t// You should know that the locks are approximate, and only to be used for\n\t// non-critical purposes.\n\tLock(ctx context.Context, key string, deadline time.Duration) (LockContext, error)\n\n\t// LockWithTimeout sets a lock for the given key. If the lock is still unreleased\n\t// the end of given period of time,\n\t// it automatically releases the lock. Acquired lock is only for the key in\n\t// this dmap.\n\t//\n\t// It returns immediately if it acquires the lock for the given key. Otherwise,\n\t// it waits until deadline.\n\t//\n\t// You should know that the locks are approximate, and only to be used for\n\t// non-critical purposes.\n\tLockWithTimeout(ctx context.Context, key string, timeout, deadline time.Duration) (LockContext, error)\n\n\t// Scan returns an iterator to loop over the keys.\n\t//\n\t// Available scan options:\n\t//\n\t// * Count\n\t// * Match\n\tScan(ctx context.Context, options ...ScanOption) (Iterator, error)\n\n\t// Destroy flushes the given DMap on the cluster. You should know that there\n\t// is no global lock on DMaps. So if you call Put/PutEx and Destroy methods\n\t// concurrently on the cluster, Put call may set new values to the DMap.\n\tDestroy(ctx context.Context) error\n\n\t// Pipeline is a mechanism to realise Redis Pipeline technique.\n\t//\n\t// Pipelining is a technique to extremely speed up processing by packing\n\t// operations to batches, send them at once to Redis and read a replies in a\n\t// singe step.\n\t// See https://redis.io/topics/pipelining\n\t//\n\t// Pay attention, that Pipeline is not a transaction, so you can get unexpected\n\t// results in case of big pipelines and small read/write timeouts.\n\t// Redis client has retransmission logic in case of timeouts, pipeline\n\t// can be retransmitted and commands can be executed more than once.\n\tPipeline(opts ...PipelineOption) (*DMapPipeline, error)\n\n\t// Close stops background routines and frees allocated resources.\n\tClose(ctx context.Context) error\n}\n\n// PipelineOption is a function for defining options to control behavior of the Pipeline command.\ntype PipelineOption func(pipeline *DMapPipeline)\n\n// PipelineConcurrency is a PipelineOption controlling the number of concurrent goroutines.\nfunc PipelineConcurrency(concurrency int) PipelineOption {\n\treturn func(dp *DMapPipeline) {\n\t\tdp.concurrency = concurrency\n\t}\n}\n\ntype statsConfig struct {\n\tCollectRuntime bool\n}\n\n// StatsOption is a function for defining options to control behavior of the STATS command.\ntype StatsOption func(*statsConfig)\n\n// CollectRuntime is a StatsOption for collecting Go runtime statistics from a cluster member.\nfunc CollectRuntime() StatsOption {\n\treturn func(cfg *statsConfig) {\n\t\tcfg.CollectRuntime = true\n\t}\n}\n\ntype pubsubConfig struct {\n\tAddress string\n}\n\n// ToAddress is a PubSubOption for using a specific cluster member to publish messages to a channel.\nfunc ToAddress(addr string) PubSubOption {\n\treturn func(cfg *pubsubConfig) {\n\t\tcfg.Address = addr\n\t}\n}\n\n// PubSubOption is a function for defining options to control behavior of the Publish-Subscribe service.\ntype PubSubOption func(option *pubsubConfig)\n\n// Client is an interface that denotes an Olric client.\ntype Client interface {\n\t// NewDMap returns a new DMap client with the given options.\n\tNewDMap(name string, options ...DMapOption) (DMap, error)\n\n\t// NewPubSub returns a new PubSub client with the given options.\n\tNewPubSub(options ...PubSubOption) (*PubSub, error)\n\n\t// Stats returns stats.Stats with the given options.\n\tStats(ctx context.Context, address string, options ...StatsOption) (stats.Stats, error)\n\n\t// Ping sends a ping message to an Olric node. Returns PONG if message is empty,\n\t// otherwise return a copy of the message as a bulk. This command is often used to test\n\t// if a connection is still alive, or to measure latency.\n\tPing(ctx context.Context, address, message string) (string, error)\n\n\t// RoutingTable returns the latest version of the routing table.\n\tRoutingTable(ctx context.Context) (RoutingTable, error)\n\n\t// Members returns a thread-safe list of cluster members.\n\tMembers(ctx context.Context) ([]Member, error)\n\n\t// RefreshMetadata fetches a list of available members and the latest routing\n\t// table version. It also closes stale clients, if there are any.\n\tRefreshMetadata(ctx context.Context) error\n\n\t// Close stops background routines and frees allocated resources.\n\tClose(ctx context.Context) error\n}\n"
  },
  {
    "path": "cluster.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\ntype Route struct {\n\tPrimaryOwners []string\n\tReplicaOwners []string\n}\n\ntype RoutingTable map[uint64]Route\n\nfunc mapToRoutingTable(slice []interface{}) (RoutingTable, error) {\n\trt := make(RoutingTable)\n\tfor _, raw := range slice {\n\t\titem := raw.([]interface{})\n\t\trawPartID, rawPrimaryOwners, rawReplicaOwners := item[0], item[1], item[2]\n\t\tvar partID uint64\n\t\tswitch rawPartID.(type) {\n\t\tcase int64:\n\t\t\tpartID = uint64(rawPartID.(int64))\n\t\tcase string:\n\t\t\traw, err := strconv.ParseUint(rawPartID.(string), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid partition id: %v: %w\", rawPartID, err)\n\t\t\t}\n\t\t\tpartID = raw\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid partition id: %v\", rawPartID)\n\t\t}\n\n\t\tr := Route{}\n\t\tprimaryOwners, ok := rawPrimaryOwners.([]interface{})\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"invalid primary owners: %v\", rawPrimaryOwners)\n\t\t}\n\t\tfor _, rawOwner := range primaryOwners {\n\t\t\towner, ok := rawOwner.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid owner: %v\", owner)\n\t\t\t}\n\t\t\tr.PrimaryOwners = append(r.PrimaryOwners, owner)\n\t\t}\n\n\t\treplicaOwners, ok := rawReplicaOwners.([]interface{})\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"invalid replica owners: %v\", rawPrimaryOwners)\n\t\t}\n\t\tfor _, rawOwner := range replicaOwners {\n\t\t\towner, ok := rawOwner.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid owner: %v\", owner)\n\t\t\t}\n\t\t\tr.ReplicaOwners = append(r.ReplicaOwners, owner)\n\t\t}\n\t\trt[partID] = r\n\t}\n\treturn rt, nil\n}\n\nfunc (db *Olric) clusterRoutingTableCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\t_, err := protocol.ParseClusterRoutingTable(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tcoordinator := db.rt.Discovery().GetCoordinator()\n\tif coordinator.CompareByID(db.rt.This()) {\n\t\tconn.WriteArray(int(db.config.PartitionCount))\n\t\trt := db.fillRoutingTable()\n\t\tfor partID := uint64(0); partID < db.config.PartitionCount; partID++ {\n\t\t\tconn.WriteArray(3)\n\t\t\tconn.WriteUint64(partID)\n\n\t\t\tr := rt[partID]\n\t\t\tprimaryOwners := r.PrimaryOwners\n\t\t\tconn.WriteArray(len(primaryOwners))\n\t\t\tfor _, owner := range primaryOwners {\n\t\t\t\tconn.WriteBulkString(owner)\n\t\t\t}\n\n\t\t\treplicaOwners := r.ReplicaOwners\n\t\t\tconn.WriteArray(len(replicaOwners))\n\t\t\tfor _, owner := range replicaOwners {\n\t\t\t\tconn.WriteBulkString(owner)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\t// Redirect to the cluster coordinator\n\trtCmd := protocol.NewClusterRoutingTable().Command(db.ctx)\n\trc := db.client.Get(coordinator.String())\n\terr = rc.Process(db.ctx, rtCmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tslice, err := rtCmd.Slice()\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteAny(slice)\n}\n\nfunc (db *Olric) fillRoutingTable() RoutingTable {\n\trt := make(RoutingTable)\n\tfor partID := uint64(0); partID < db.config.PartitionCount; partID++ {\n\t\tr := Route{}\n\t\tprimaryOwners := db.primary.PartitionOwnersByID(partID)\n\t\tfor _, owner := range primaryOwners {\n\t\t\tr.PrimaryOwners = append(r.PrimaryOwners, owner.String())\n\t\t}\n\t\treplicaOwners := db.backup.PartitionOwnersByID(partID)\n\t\tfor _, owner := range replicaOwners {\n\t\t\tr.ReplicaOwners = append(r.ReplicaOwners, owner.String())\n\t\t}\n\t\trt[partID] = r\n\t}\n\treturn rt\n}\n\nfunc (db *Olric) routingTable(ctx context.Context) (RoutingTable, error) {\n\tcoordinator := db.rt.Discovery().GetCoordinator()\n\tif coordinator.CompareByID(db.rt.This()) {\n\t\treturn db.fillRoutingTable(), nil\n\t}\n\n\trtCmd := protocol.NewClusterRoutingTable().Command(ctx)\n\trc := db.client.Get(coordinator.String())\n\terr := rc.Process(ctx, rtCmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tslice, err := rtCmd.Slice()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn mapToRoutingTable(slice)\n}\n\nfunc (db *Olric) clusterMembersCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\t_, err := protocol.ParseClusterMembers(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tcoordinator := db.rt.Discovery().GetCoordinator()\n\tmembers := db.rt.Discovery().GetMembers()\n\tconn.WriteArray(len(members))\n\tfor _, member := range members {\n\t\tconn.WriteArray(3)\n\t\tconn.WriteBulkString(member.Name)\n\t\t// go-redis/redis package cannot handle uint64. At the time of this writing,\n\t\t// there is no solution for this, and I don't want to use a soft fork to repair it.\n\t\t//conn.WriteUint64(member.ID)\n\t\tconn.WriteInt64(member.Birthdate)\n\t\tif coordinator.CompareByID(member) {\n\t\t\tconn.WriteBulkString(\"true\")\n\t\t} else {\n\t\t\tconn.WriteBulkString(\"false\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "cluster_client.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/hasher\"\n\t\"github.com/olric-data/olric/internal/bufpool\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/ramblock/entry\"\n\t\"github.com/olric-data/olric/internal/resp\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/olric-data/olric/stats\"\n\t\"github.com/redis/go-redis/v9\"\n)\n\nvar pool = bufpool.New()\n\n// DefaultRoutingTableFetchInterval is the default value of RoutingTableFetchInterval. ClusterClient implementation\n// fetches the routing table from the cluster to route requests to the right partition.\nconst DefaultRoutingTableFetchInterval = time.Minute\n\ntype ClusterLockContext struct {\n\tkey   string\n\ttoken string\n\tdm    *ClusterDMap\n}\n\n// ClusterDMap implements a client for DMaps.\ntype ClusterDMap struct {\n\tname          string\n\tnewEntry      func() storage.Entry\n\tconfig        *dmapConfig\n\tclient        *server.Client\n\tclusterClient *ClusterClient\n}\n\n// Name exposes name of the DMap.\nfunc (dm *ClusterDMap) Name() string {\n\treturn dm.name\n}\n\n// processProtocolError processes protocol-related errors and translates them into defined application-level errors.\nfunc processProtocolError(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif errors.Is(err, redis.Nil) {\n\t\treturn ErrKeyNotFound\n\t}\n\tif errors.Is(err, syscall.ECONNREFUSED) {\n\t\topErr := err.(*net.OpError)\n\t\treturn fmt.Errorf(\"%s %s %s: %w\", opErr.Op, opErr.Net, opErr.Addr, ErrConnRefused)\n\t}\n\treturn convertDMapError(protocol.ConvertError(err))\n}\n\n// writePutCommand constructs and returns a new protocol.Put command based on the provided key, value, and configuration options.\nfunc (dm *ClusterDMap) writePutCommand(c *dmap.PutConfig, key string, value []byte) *protocol.Put {\n\tcmd := protocol.NewPut(dm.name, key, value)\n\tswitch {\n\tcase c.HasEX:\n\t\tcmd.SetEX(c.EX.Seconds())\n\tcase c.HasPX:\n\t\tcmd.SetPX(c.PX.Milliseconds())\n\tcase c.HasEXAT:\n\t\tcmd.SetEXAT(c.EXAT.Seconds())\n\tcase c.HasPXAT:\n\t\tcmd.SetPXAT(c.PXAT.Milliseconds())\n\t}\n\n\tswitch {\n\tcase c.HasNX:\n\t\tcmd.SetNX()\n\tcase c.HasXX:\n\t\tcmd.SetXX()\n\t}\n\n\treturn cmd\n}\n\nfunc (cl *ClusterClient) clientByPartID(partID uint64) (*redis.Client, error) {\n\traw := cl.routingTable.Load()\n\tif raw == nil {\n\t\treturn nil, fmt.Errorf(\"routing table is empty\")\n\t}\n\n\troutingTable, ok := raw.(RoutingTable)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"routing table is corrupt\")\n\t}\n\n\troute := routingTable[partID]\n\tif len(route.PrimaryOwners) == 0 {\n\t\treturn nil, fmt.Errorf(\"primary owners list for %d is empty\", partID)\n\t}\n\n\tprimaryOwner := route.PrimaryOwners[len(route.PrimaryOwners)-1]\n\treturn cl.client.Get(primaryOwner), nil\n}\n\nfunc (cl *ClusterClient) smartPick(dmap, key string) (*redis.Client, error) {\n\thkey := partitions.HKey(dmap, key)\n\tpartID := hkey % cl.partitionCount\n\treturn cl.clientByPartID(partID)\n}\n\n// Put sets the value for the given key. It overwrites any previous value for\n// that key, and it's thread-safe. The key has to be a string. value type is arbitrary.\n// It is safe to modify the contents of the arguments after Put returns but not before.\nfunc (dm *ClusterDMap) Put(ctx context.Context, key string, value interface{}, options ...PutOption) error {\n\trc, err := dm.clusterClient.smartPick(dm.name, key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalueBuf := pool.Get()\n\tdefer pool.Put(valueBuf)\n\n\tenc := resp.New(valueBuf)\n\terr = enc.Encode(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar pc dmap.PutConfig\n\tfor _, opt := range options {\n\t\topt(&pc)\n\t}\n\tputCmd := dm.writePutCommand(&pc, key, valueBuf.Bytes())\n\tcmd := putCmd.Command(ctx)\n\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn processProtocolError(err)\n\t}\n\treturn processProtocolError(cmd.Err())\n}\n\nfunc (dm *ClusterDMap) makeGetResponse(cmd *redis.StringCmd) (*GetResponse, error) {\n\traw, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn nil, processProtocolError(err)\n\t}\n\n\te := dm.newEntry()\n\te.Decode(raw)\n\treturn &GetResponse{\n\t\tentry: e,\n\t}, nil\n}\n\n// Get gets the value for the given key. It returns ErrKeyNotFound if the DB\n// does not contain the key. It's thread-safe. It is safe to modify the contents\n// of the returned value. See GetResponse for the details.\nfunc (dm *ClusterDMap) Get(ctx context.Context, key string) (*GetResponse, error) {\n\tcmd := protocol.NewGet(dm.name, key).SetRaw().Command(ctx)\n\trc, err := dm.clusterClient.smartPick(dm.name, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn nil, processProtocolError(err)\n\t}\n\treturn dm.makeGetResponse(cmd)\n}\n\n// Delete deletes values for the given keys. Delete will not return an error if the key doesn't exist.\n// It's thread-safe. It is safe to modify the contents of the argument after Delete returns.\nfunc (dm *ClusterDMap) Delete(ctx context.Context, keys ...string) (int, error) {\n\trc, err := dm.client.Pick()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tcmd := protocol.NewDel(dm.name, keys...).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn 0, processProtocolError(err)\n\t}\n\n\tres, err := cmd.Uint64()\n\tif err != nil {\n\t\treturn 0, processProtocolError(cmd.Err())\n\t}\n\treturn int(res), nil\n}\n\n// Incr atomically increments the key by delta. The return value is the new value\n// after being incremented or an error.\nfunc (dm *ClusterDMap) Incr(ctx context.Context, key string, delta int) (int, error) {\n\trc, err := dm.clusterClient.smartPick(dm.name, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tcmd := protocol.NewIncr(dm.name, key, delta).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn 0, processProtocolError(err)\n\t}\n\tres, err := cmd.Uint64()\n\tif err != nil {\n\t\treturn 0, processProtocolError(cmd.Err())\n\t}\n\treturn int(res), nil\n}\n\n// Decr atomically decrements the key by delta. The return value is the new value\n// after being decremented or an error.\nfunc (dm *ClusterDMap) Decr(ctx context.Context, key string, delta int) (int, error) {\n\trc, err := dm.clusterClient.smartPick(dm.name, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tcmd := protocol.NewDecr(dm.name, key, delta).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn 0, processProtocolError(err)\n\t}\n\tres, err := cmd.Uint64()\n\tif err != nil {\n\t\treturn 0, processProtocolError(cmd.Err())\n\t}\n\treturn int(res), nil\n}\n\n// GetPut atomically sets the key to value and returns the old value stored at a key. It returns nil if there is no\n// previous value.\nfunc (dm *ClusterDMap) GetPut(ctx context.Context, key string, value interface{}) (*GetResponse, error) {\n\trc, err := dm.clusterClient.smartPick(dm.name, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalueBuf := pool.Get()\n\tdefer pool.Put(valueBuf)\n\n\tenc := resp.New(valueBuf)\n\terr = enc.Encode(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcmd := protocol.NewGetPut(dm.name, key, valueBuf.Bytes()).SetRaw().Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\terr = processProtocolError(err)\n\tif err != nil {\n\t\t// First try to set a key/value with GetPut\n\t\tif errors.Is(err, ErrKeyNotFound) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\traw, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn nil, processProtocolError(err)\n\t}\n\n\te := dm.newEntry()\n\te.Decode(raw)\n\treturn &GetResponse{\n\t\tentry: e,\n\t}, nil\n}\n\n// IncrByFloat atomically increments the key by delta. The return value is the new value\n// after being incremented or an error.\nfunc (dm *ClusterDMap) IncrByFloat(ctx context.Context, key string, delta float64) (float64, error) {\n\trc, err := dm.clusterClient.smartPick(dm.name, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tcmd := protocol.NewIncrByFloat(dm.name, key, delta).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn 0, processProtocolError(err)\n\t}\n\tres, err := cmd.Result()\n\tif err != nil {\n\t\treturn 0, processProtocolError(cmd.Err())\n\t}\n\treturn res, nil\n}\n\n// Expire updates the expiry for the given key. It returns ErrKeyNotFound if\n// the DB does not contain the key. It's thread-safe.\nfunc (dm *ClusterDMap) Expire(ctx context.Context, key string, timeout time.Duration) error {\n\trc, err := dm.clusterClient.smartPick(dm.name, key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := protocol.NewExpire(dm.name, key, timeout).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn processProtocolError(err)\n\t}\n\treturn processProtocolError(cmd.Err())\n}\n\n// Lock sets a lock for the given key. Acquired lock is only for the key in\n// this dmap.\n//\n// It returns immediately if it acquires the lock for the given key. Otherwise,\n// it waits until deadline.\n//\n// You should know that the locks are approximate and only to be used for\n// non-critical purposes.\nfunc (dm *ClusterDMap) Lock(ctx context.Context, key string, deadline time.Duration) (LockContext, error) {\n\trc, err := dm.clusterClient.smartPick(dm.name, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcmd := protocol.NewLock(dm.name, key, deadline.Seconds()).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn nil, processProtocolError(err)\n\t}\n\n\ttoken, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn nil, processProtocolError(err)\n\t}\n\treturn &ClusterLockContext{\n\t\tkey:   key,\n\t\ttoken: string(token),\n\t\tdm:    dm,\n\t}, nil\n}\n\n// LockWithTimeout sets a lock for the given key. If the lock is still unreleased\n// the end of a given period of time, it automatically releases the lock.\n// Acquired lock is only for the key in this DMap.\n//\n// It returns immediately if it acquires the lock for the given key. Otherwise,\n// it waits until deadline.\n//\n// You should know that the locks are approximate and only to be used for\n// non-critical purposes.\nfunc (dm *ClusterDMap) LockWithTimeout(ctx context.Context, key string, timeout, deadline time.Duration) (LockContext, error) {\n\trc, err := dm.clusterClient.smartPick(dm.name, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcmd := protocol.NewLock(dm.name, key, deadline.Seconds()).SetPX(timeout.Milliseconds()).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn nil, processProtocolError(err)\n\t}\n\n\ttoken, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn nil, processProtocolError(err)\n\t}\n\n\treturn &ClusterLockContext{\n\t\tkey:   key,\n\t\ttoken: string(token),\n\t\tdm:    dm,\n\t}, nil\n}\n\n// Close stops background routines and frees allocated resources.\nfunc (dm *ClusterDMap) Close(_ context.Context) error {\n\treturn nil\n}\n\n// Unlock releases the distributed lock associated with the current context by using the provided context for execution.\nfunc (c *ClusterLockContext) Unlock(ctx context.Context) error {\n\trc, err := c.dm.clusterClient.smartPick(c.dm.name, c.key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := protocol.NewUnlock(c.dm.name, c.key, c.token).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn processProtocolError(err)\n\t}\n\treturn processProtocolError(cmd.Err())\n}\n\n// Lease extends the lease of the distributed lock associated with the context for the specified duration.\nfunc (c *ClusterLockContext) Lease(ctx context.Context, duration time.Duration) error {\n\trc, err := c.dm.clusterClient.smartPick(c.dm.name, c.key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := protocol.NewLockLease(c.dm.name, c.key, c.token, duration.Seconds()).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn processProtocolError(err)\n\t}\n\treturn processProtocolError(cmd.Err())\n}\n\n// Scan returns an iterator to loop over the keys.\n//\n// Available scan options:\n//\n// * Count\n// * Match\nfunc (dm *ClusterDMap) Scan(ctx context.Context, options ...ScanOption) (Iterator, error) {\n\tvar sc dmap.ScanConfig\n\tfor _, opt := range options {\n\t\topt(&sc)\n\t}\n\tif sc.Count == 0 {\n\t\tsc.Count = DefaultScanCount\n\t}\n\n\tictx, cancel := context.WithCancel(ctx)\n\ti := &ClusterIterator{\n\t\tdm:            dm,\n\t\tclusterClient: dm.clusterClient,\n\t\tconfig:        &sc,\n\t\tlogger:        dm.clusterClient.logger,\n\t\tpartitionKeys: make(map[string]struct{}),\n\t\tcursors:       make(map[uint64]map[string]*currentCursor),\n\t\tctx:           ictx,\n\t\tcancel:        cancel,\n\t}\n\n\t// Embedded iterator uses a slightly different scan function.\n\ti.scanner = i.scanOnOwners\n\n\tif err := i.fetchRoutingTable(); err != nil {\n\t\treturn nil, err\n\t}\n\t// Load the route for the first partition (0) to scan.\n\ti.loadRoute()\n\n\ti.wg.Add(1)\n\tgo i.fetchRoutingTablePeriodically()\n\n\treturn i, nil\n}\n\n// Destroy flushes the given DMap on the cluster. You should know that there\n// is no global lock on DMaps. So if you call Put/PutEx and Destroy methods\n// concurrently on the cluster, Put call may set new values to the DMap.\nfunc (dm *ClusterDMap) Destroy(ctx context.Context) error {\n\trc, err := dm.client.Pick()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := protocol.NewDestroy(dm.name).Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn processProtocolError(err)\n\t}\n\n\treturn processProtocolError(cmd.Err())\n}\n\n// ClusterClient is a client for managing and interacting with a distributed cluster of nodes.\ntype ClusterClient struct {\n\tclient         *server.Client\n\tconfig         *clusterClientConfig\n\tlogger         *log.Logger\n\troutingTable   atomic.Value\n\tpartitionCount uint64\n\twg             sync.WaitGroup\n\tctx            context.Context\n\tcancel         context.CancelFunc\n}\n\n// Ping sends a ping message to an Olric node. Returns PONG if a message is empty,\n// otherwise return a copy of the message as bulk. This command is often used to test\n// if a connection is still alive or to measure latency.\nfunc (cl *ClusterClient) Ping(ctx context.Context, addr, message string) (string, error) {\n\tpingCmd := protocol.NewPing()\n\tif message != \"\" {\n\t\tpingCmd.SetMessage(message)\n\t}\n\tcmd := pingCmd.Command(ctx)\n\n\trc := cl.client.Get(addr)\n\terr := rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn \"\", processProtocolError(err)\n\t}\n\terr = processProtocolError(cmd.Err())\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\n\treturn cmd.Result()\n}\n\n// RoutingTable returns the latest version of the routing table.\nfunc (cl *ClusterClient) RoutingTable(ctx context.Context) (RoutingTable, error) {\n\tcmd := protocol.NewClusterRoutingTable().Command(ctx)\n\trc, err := cl.client.Pick()\n\tif err != nil {\n\t\treturn RoutingTable{}, err\n\t}\n\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn RoutingTable{}, processProtocolError(err)\n\t}\n\n\tif err = cmd.Err(); err != nil {\n\t\treturn RoutingTable{}, processProtocolError(err)\n\t}\n\n\tresult, err := cmd.Slice()\n\tif err != nil {\n\t\treturn RoutingTable{}, processProtocolError(err)\n\n\t}\n\treturn mapToRoutingTable(result)\n}\n\n// Stats returns stats.Stats with the given options.\nfunc (cl *ClusterClient) Stats(ctx context.Context, address string, options ...StatsOption) (stats.Stats, error) {\n\tvar cfg statsConfig\n\tfor _, opt := range options {\n\t\topt(&cfg)\n\t}\n\n\tstatsCmd := protocol.NewStats()\n\tif cfg.CollectRuntime {\n\t\tstatsCmd.SetCollectRuntime()\n\t}\n\n\tcmd := statsCmd.Command(ctx)\n\trc := cl.client.Get(address)\n\n\terr := rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn stats.Stats{}, processProtocolError(err)\n\t}\n\n\tif err = cmd.Err(); err != nil {\n\t\treturn stats.Stats{}, processProtocolError(err)\n\t}\n\tdata, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn stats.Stats{}, processProtocolError(err)\n\t}\n\tvar s stats.Stats\n\terr = json.Unmarshal(data, &s)\n\tif err != nil {\n\t\treturn stats.Stats{}, processProtocolError(err)\n\t}\n\treturn s, nil\n}\n\n// Members returns a thread-safe list of cluster members.\nfunc (cl *ClusterClient) Members(ctx context.Context) ([]Member, error) {\n\trc, err := cl.client.Pick()\n\tif err != nil {\n\t\treturn []Member{}, err\n\t}\n\n\tcmd := protocol.NewClusterMembers().Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn []Member{}, processProtocolError(err)\n\t}\n\n\tif err = cmd.Err(); err != nil {\n\t\treturn []Member{}, processProtocolError(err)\n\t}\n\n\titems, err := cmd.Slice()\n\tif err != nil {\n\t\treturn []Member{}, processProtocolError(err)\n\t}\n\tvar members []Member\n\tfor _, rawItem := range items {\n\t\tm := Member{}\n\t\titem := rawItem.([]interface{})\n\t\tm.Name = item[0].(string)\n\t\tm.Birthdate = item[1].(int64)\n\n\t\t// go-redis/redis package cannot handle uint64 type. At the time of this writing,\n\t\t// there is no solution for this, and I don't want to use a soft fork to repair it.\n\t\tm.ID = discovery.MemberID(m.Name, m.Birthdate)\n\n\t\tif item[2] == \"true\" {\n\t\t\tm.Coordinator = true\n\t\t}\n\t\tmembers = append(members, m)\n\t}\n\treturn members, nil\n}\n\n// RefreshMetadata fetches a list of available members and the latest routing\n// table version. It also closes stale clients if there are any.\nfunc (cl *ClusterClient) RefreshMetadata(ctx context.Context) error {\n\t// Fetch a list of currently available cluster members.\n\tvar members []Member\n\tvar err error\n\tfor {\n\t\tmembers, err = cl.Members(ctx)\n\t\tif errors.Is(err, ErrConnRefused) {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\t}\n\t// Use a map for fast access.\n\taddresses := make(map[string]struct{})\n\tfor _, member := range members {\n\t\taddresses[member.Name] = struct{}{}\n\t}\n\n\t// Clean stale client connections\n\tfor addr := range cl.client.Addresses() {\n\t\tif _, ok := addresses[addr]; !ok {\n\t\t\t// Gone\n\t\t\tif err := cl.client.Close(addr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Re-fetch the routing table, we should use the latest routing table version.\n\treturn cl.fetchRoutingTable()\n}\n\n// Close stops background routines and frees allocated resources.\nfunc (cl *ClusterClient) Close(ctx context.Context) error {\n\tselect {\n\tcase <-cl.ctx.Done():\n\t\treturn nil\n\tdefault:\n\t}\n\n\tcl.cancel()\n\n\t// Wait for the background workers:\n\t// * fetchRoutingTablePeriodically\n\tcl.wg.Wait()\n\n\t// Close the underlying TCP sockets gracefully.\n\treturn cl.client.Shutdown(ctx)\n}\n\n// NewPubSub returns a new PubSub client with the given options.\nfunc (cl *ClusterClient) NewPubSub(options ...PubSubOption) (*PubSub, error) {\n\treturn newPubSub(cl.client, options...)\n}\n\n// NewDMap returns a new DMap client with the given options.\nfunc (cl *ClusterClient) NewDMap(name string, options ...DMapOption) (DMap, error) {\n\tvar dc dmapConfig\n\tfor _, opt := range options {\n\t\topt(&dc)\n\t}\n\n\tif dc.storageEntryImplementation == nil {\n\t\tdc.storageEntryImplementation = func() storage.Entry {\n\t\t\treturn entry.New()\n\t\t}\n\t}\n\n\treturn &ClusterDMap{name: name,\n\t\tconfig:        &dc,\n\t\tnewEntry:      dc.storageEntryImplementation,\n\t\tclient:        cl.client,\n\t\tclusterClient: cl,\n\t}, nil\n}\n\n// ClusterClientOption is a functional option for configuring a clusterClientConfig instance.\ntype ClusterClientOption func(c *clusterClientConfig)\n\n// clusterClientConfig holds the configuration required to initialize and manage a cluster client instance.\ntype clusterClientConfig struct {\n\tlogger                    *log.Logger\n\tconfig                    *config.Client\n\tauthentication            *config.Authentication\n\thasher                    hasher.Hasher\n\troutingTableFetchInterval time.Duration\n}\n\n// WithHasher sets a custom hasher implementation to the cluster client configuration.\nfunc WithHasher(h hasher.Hasher) ClusterClientOption {\n\treturn func(cfg *clusterClientConfig) {\n\t\tcfg.hasher = h\n\t}\n}\n\n// WithLogger sets a custom logger for the cluster client configuration.\nfunc WithLogger(l *log.Logger) ClusterClientOption {\n\treturn func(cfg *clusterClientConfig) {\n\t\tcfg.logger = l\n\t}\n}\n\n// WithConfig applies a specified config.Client to the clusterClientConfig.\nfunc WithConfig(c *config.Client) ClusterClientOption {\n\treturn func(cfg *clusterClientConfig) {\n\t\tcfg.config = c\n\t}\n}\n\n// WithPassword configures a cluster client with the specified password for authentication.\nfunc WithPassword(password string) ClusterClientOption {\n\treturn func(cfg *clusterClientConfig) {\n\t\tcfg.authentication = &config.Authentication{\n\t\t\tPassword: password,\n\t\t}\n\t}\n}\n\n// WithRoutingTableFetchInterval sets the interval for periodic fetching of the routing table in a cluster client configuration.\nfunc WithRoutingTableFetchInterval(interval time.Duration) ClusterClientOption {\n\treturn func(cfg *clusterClientConfig) {\n\t\tcfg.routingTableFetchInterval = interval\n\t}\n}\n\n// fetchRoutingTable updates the cluster routing table by fetching the latest version from the cluster.\n// It initializes the partition count if it's the first invocation. Returns an error if fetching fails.\nfunc (cl *ClusterClient) fetchRoutingTable() error {\n\tctx, cancel := context.WithCancel(cl.ctx)\n\tdefer cancel()\n\n\troutingTable, err := cl.RoutingTable(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while loading the routing table: %w\", err)\n\t}\n\n\tprevious := cl.routingTable.Load()\n\tif previous == nil {\n\t\t// First run. Partition count is a constant, actually. It has to be greater than zero.\n\t\tcl.partitionCount = uint64(len(routingTable))\n\t}\n\tcl.routingTable.Store(routingTable)\n\treturn nil\n}\n\n// fetchRoutingTablePeriodically periodically updates the routing table by invoking fetchRoutingTable at configured intervals.\n// It stops gracefully when the context is canceled or an error occurs.\nfunc (cl *ClusterClient) fetchRoutingTablePeriodically() {\n\tdefer cl.wg.Done()\n\n\tticker := time.NewTicker(cl.config.routingTableFetchInterval)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-cl.ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\terr := cl.fetchRoutingTable()\n\t\t\tif err != nil {\n\t\t\t\tcl.logger.Printf(\"[ERROR] Failed to fetch the latest version of the routing table: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// NewClusterClient creates a new Client instance. It needs one node address at least to discover the whole cluster.\nfunc NewClusterClient(addresses []string, options ...ClusterClientOption) (*ClusterClient, error) {\n\tif len(addresses) == 0 {\n\t\treturn nil, fmt.Errorf(\"addresses cannot be empty\")\n\t}\n\n\tvar cc clusterClientConfig\n\tfor _, opt := range options {\n\t\topt(&cc)\n\t}\n\n\tif cc.hasher == nil {\n\t\tcc.hasher = hasher.NewDefaultHasher()\n\t}\n\n\tif cc.logger == nil {\n\t\tcc.logger = log.New(os.Stderr, \"logger: \", log.Lshortfile)\n\t}\n\n\tif cc.config == nil {\n\t\tcc.config = config.NewClient()\n\t}\n\n\tif cc.authentication != nil {\n\t\tcc.config.Authentication = cc.authentication\n\t}\n\n\tif cc.routingTableFetchInterval <= 0 {\n\t\tcc.routingTableFetchInterval = DefaultRoutingTableFetchInterval\n\t}\n\n\tif err := cc.config.Sanitize(); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := cc.config.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tcl := &ClusterClient{\n\t\tclient: server.NewClient(cc.config),\n\t\tconfig: &cc,\n\t\tlogger: cc.logger,\n\t\tctx:    ctx,\n\t\tcancel: cancel,\n\t}\n\n\t// Initialize clients for the given cluster members.\n\tfor _, address := range addresses {\n\t\tcl.client.Get(address)\n\t}\n\n\t// Discover all cluster members\n\tmembers, err := cl.Members(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error while discovering the cluster members: %w\", err)\n\t}\n\tfor _, member := range members {\n\t\tcl.client.Get(member.Name)\n\t}\n\n\t// Hash function is required to target primary owners instead of random cluster members.\n\tpartitions.SetHashFunc(cc.hasher)\n\n\t// Initial fetch. ClusterClient targets the primary owners for a smooth and quick operation.\n\tif err := cl.fetchRoutingTable(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Refresh the routing table in every 15 seconds.\n\tcl.wg.Add(1)\n\tgo cl.fetchRoutingTablePeriodically()\n\n\treturn cl, nil\n}\n\nvar (\n\t_ Client = (*ClusterClient)(nil)\n\t_ DMap   = (*ClusterDMap)(nil)\n)\n"
  },
  {
    "path": "cluster_client_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/hasher\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/olric-data/olric/stats\"\n\t\"github.com/stretchr/testify/require\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\nfunc TestClusterClient_Ping(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tcluster.addMember(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tresponse, err := c.Ping(ctx, db.rt.This().String(), \"\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, DefaultPingResponse, response)\n}\n\nfunc TestClusterClient_Ping_WithMessage(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tcluster.addMember(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tmessage := \"Olric is the best!\"\n\tresult, err := c.Ping(ctx, db.rt.This().String(), message)\n\trequire.NoError(t, err)\n\trequire.Equal(t, message, result)\n}\n\nfunc TestClusterClient_RoutingTable(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\trt, err := c.RoutingTable(ctx)\n\trequire.NoError(t, err)\n\n\trequire.Len(t, rt, int(db.config.PartitionCount))\n}\n\nfunc TestClusterClient_RoutingTable_Cluster(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tcluster.addMember(t) // Cluster coordinator\n\t<-time.After(250 * time.Millisecond)\n\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\trt, err := c.RoutingTable(ctx)\n\trequire.NoError(t, err)\n\n\trequire.Len(t, rt, int(db.config.PartitionCount))\n}\n\nfunc TestClusterClient_Put(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n}\n\nfunc TestClusterClient_Get(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\n\tgr, err := dm.Get(ctx, \"mykey\")\n\trequire.NoError(t, err)\n\n\tres, err := gr.String()\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, res, \"myvalue\")\n}\n\nfunc TestClusterClient_Delete(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\n\tcount, err := dm.Delete(ctx, \"mykey\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, 1, count)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestClusterClient_Delete_Many_Keys(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tvar keys []string\n\tfor i := 0; i < 10; i++ {\n\t\tkey := testutil.ToKey(i)\n\t\terr = dm.Put(context.Background(), key, \"myvalue\")\n\t\trequire.NoError(t, err)\n\t\tkeys = append(keys, key)\n\t}\n\n\tcount, err := dm.Delete(context.Background(), keys...)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 10, count)\n}\n\nfunc TestClusterClient_Destroy(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\n\terr = dm.Destroy(ctx)\n\trequire.NoError(t, err)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestClusterClient_Incr(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 10; i++ {\n\t\terrGr.Go(func() error {\n\t\t\t_, err = dm.Incr(ctx, \"mykey\", 1)\n\t\t\treturn err\n\t\t})\n\t}\n\n\trequire.NoError(t, errGr.Wait())\n\n\tresult, err := dm.Incr(ctx, \"mykey\", 1)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 11, result)\n}\n\nfunc TestClusterClient_IncrByFloat(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 10; i++ {\n\t\terrGr.Go(func() error {\n\t\t\t_, err = dm.IncrByFloat(ctx, \"mykey\", 1.2)\n\t\t\treturn err\n\t\t})\n\t}\n\n\trequire.NoError(t, errGr.Wait())\n\n\tresult, err := dm.IncrByFloat(ctx, \"mykey\", 1.2)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 13.199999999999998, result)\n}\n\nfunc TestClusterClient_Decr(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", 11)\n\trequire.NoError(t, err)\n\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 10; i++ {\n\t\terrGr.Go(func() error {\n\t\t\t_, err = dm.Decr(ctx, \"mykey\", 1)\n\t\t\treturn err\n\t\t})\n\t}\n\n\trequire.NoError(t, errGr.Wait())\n\n\tresult, err := dm.Decr(ctx, \"mykey\", 1)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 0, result)\n}\n\nfunc TestClusterClient_GetPut(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tgr, err := dm.GetPut(ctx, \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\trequire.Nil(t, gr)\n\n\tgr, err = dm.GetPut(ctx, \"mykey\", \"myvalue-2\")\n\trequire.NoError(t, err)\n\n\tvalue, err := gr.String()\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"myvalue\", value)\n}\n\nfunc TestClusterClient_Expire(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\n\terr = dm.Expire(ctx, \"mykey\", time.Millisecond)\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestClusterClient_Lock_Unlock(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tlx, err := dm.Lock(ctx, \"lock.foo.key\", time.Second)\n\trequire.NoError(t, err)\n\n\terr = lx.Unlock(ctx)\n\trequire.NoError(t, err)\n}\n\nfunc TestClusterClient_Lock_Lease(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tlx, err := dm.Lock(ctx, \"lock.foo.key\", time.Second)\n\trequire.NoError(t, err)\n\n\terr = lx.Lease(ctx, time.Millisecond)\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\terr = lx.Unlock(ctx)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestClusterClient_Lock_ErrLockNotAcquired(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\t_, err = dm.Lock(ctx, \"lock.foo.key\", time.Second)\n\trequire.NoError(t, err)\n\n\t_, err = dm.Lock(ctx, \"lock.foo.key\", time.Millisecond)\n\trequire.ErrorIs(t, err, ErrLockNotAcquired)\n}\n\nfunc TestClusterClient_LockWithTimeout(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tlx, err := dm.LockWithTimeout(ctx, \"lock.foo.key\", time.Hour, time.Second)\n\trequire.NoError(t, err)\n\n\terr = lx.Unlock(ctx)\n\trequire.NoError(t, err)\n}\n\nfunc TestClusterClient_LockWithTimeout_ErrNoSuchLock(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tlx, err := dm.LockWithTimeout(ctx, \"lock.foo.key\", time.Millisecond, time.Second)\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\terr = lx.Unlock(ctx)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestClusterClient_LockWithTimeout_Then_Lease(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tlx, err := dm.LockWithTimeout(ctx, \"lock.foo.key\", 50*time.Millisecond, time.Second)\n\trequire.NoError(t, err)\n\n\t// Expand its timeout value\n\terr = lx.Lease(ctx, time.Hour)\n\trequire.NoError(t, err)\n\n\t<-time.After(100 * time.Millisecond)\n\n\t_, err = dm.Lock(ctx, \"lock.foo.key\", time.Millisecond)\n\trequire.ErrorIs(t, err, ErrLockNotAcquired)\n}\n\nfunc TestClusterClient_LockWithTimeout_ErrLockNotAcquired(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\t_, err = dm.LockWithTimeout(ctx, \"lock.foo.key\", time.Hour, time.Second)\n\trequire.NoError(t, err)\n\n\t_, err = dm.Lock(ctx, \"lock.foo.key\", time.Millisecond)\n\trequire.Equal(t, err, ErrLockNotAcquired)\n}\n\nfunc TestClusterClient_Put_Ex(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", EX(time.Second))\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Second)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestClusterClient_Put_PX(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", PX(time.Millisecond))\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestClusterClient_Put_EXAT(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", EXAT(time.Duration(time.Now().Add(time.Second).UnixNano())))\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Second)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestClusterClient_Put_PXAT(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", PXAT(time.Duration(time.Now().Add(time.Millisecond).UnixNano())))\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestClusterClient_Put_NX(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue-2\", NX())\n\trequire.ErrorIs(t, err, ErrKeyFound)\n\n\tgr, err := dm.Get(ctx, \"mykey\")\n\trequire.NoError(t, err)\n\n\tvalue, err := gr.String()\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"myvalue\", value)\n}\n\nfunc TestClusterClient_Put_XX(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue-2\", XX())\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestClusterClient_Stats(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tvar empty stats.Stats\n\ts, err := c.Stats(ctx, db.rt.This().String())\n\trequire.NoError(t, err)\n\trequire.Nil(t, s.Runtime)\n\trequire.NotEqual(t, empty, s)\n}\n\nfunc TestClusterClient_Stats_Cluster(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\tdb2 := cluster.addMember(t)\n\n\t<-time.After(250 * time.Millisecond)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tvar empty stats.Stats\n\ts, err := c.Stats(ctx, db2.rt.This().String())\n\trequire.NoError(t, err)\n\trequire.Nil(t, s.Runtime)\n\trequire.NotEqual(t, empty, s)\n\trequire.Equal(t, db2.rt.This().String(), s.Member.String())\n}\n\nfunc TestClusterClient_Stats_CollectRuntime(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tvar empty stats.Stats\n\ts, err := c.Stats(ctx, db.rt.This().String(), CollectRuntime())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, s.Runtime)\n\trequire.NotEqual(t, empty, s)\n}\n\nfunc TestClusterClient_Set_Options(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\n\tlg := log.New(os.Stderr, \"logger: \", log.Lshortfile)\n\tcfg := config.NewClient()\n\tc, err := NewClusterClient([]string{db.name}, WithConfig(cfg), WithLogger(lg))\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\trequire.Equal(t, cfg, c.config.config)\n\trequire.Equal(t, lg, c.config.logger)\n}\n\nfunc TestClusterClient_Members(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tcluster.addMember(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tmembers, err := c.Members(ctx)\n\trequire.NoError(t, err)\n\trequire.Len(t, members, 2)\n\n\tcoordinator := db.rt.Discovery().GetCoordinator()\n\tfor _, member := range members {\n\t\trequire.NotEqual(t, \"\", member.Name)\n\t\trequire.NotEqual(t, 0, member.ID)\n\t\trequire.NotEqual(t, 0, member.Birthdate)\n\t\tif coordinator.ID == member.ID {\n\t\t\trequire.True(t, member.Coordinator)\n\t\t} else {\n\t\t\trequire.False(t, member.Coordinator)\n\t\t}\n\t}\n}\n\nfunc TestClusterClient_smartPick(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb1 := cluster.addMember(t)\n\tdb2 := cluster.addMember(t)\n\tdb3 := cluster.addMember(t)\n\tdb4 := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient(\n\t\t[]string{db1.name, db2.name, db3.name, db4.name},\n\t\tWithHasher(hasher.NewDefaultHasher()),\n\t)\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tclients := make(map[string]struct{})\n\tfor i := 0; i < 1000; i++ {\n\t\trc, err := c.smartPick(\"mydmap\", testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\tclients[rc.String()] = struct{}{}\n\t}\n\trequire.Len(t, clients, 4)\n}\n"
  },
  {
    "path": "cluster_iterator.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n)\n\ntype currentCursor struct {\n\tprimary uint64\n\treplica uint64\n}\n\n// ClusterIterator implements distributed query on DMaps.\ntype ClusterIterator struct {\n\tmtx             sync.Mutex // protects pos and page\n\troutingTableMtx sync.Mutex // protects routingTable and partitionCount\n\n\tlogger         *log.Logger\n\tdm             *ClusterDMap\n\tclusterClient  *ClusterClient\n\tpos            int\n\tpage           []string\n\troute          *Route\n\tpartitionKeys  map[string]struct{}\n\tcursors        map[uint64]map[string]*currentCursor\n\tpartID         uint64 // current partition id\n\troutingTable   RoutingTable\n\tpartitionCount uint64\n\tconfig         *dmap.ScanConfig\n\tscanner        func() error\n\twg             sync.WaitGroup\n\tctx            context.Context\n\tcancel         context.CancelFunc\n}\n\nfunc (i *ClusterIterator) loadRoute() {\n\ti.routingTableMtx.Lock()\n\tdefer i.routingTableMtx.Unlock()\n\n\troute, ok := i.routingTable[i.partID]\n\tif !ok {\n\t\tpanic(\"partID: could not be found in the routing table\")\n\t}\n\ti.route = &route\n}\n\nfunc (i *ClusterIterator) updateCursor(owner string, cursor uint64) {\n\tif _, ok := i.cursors[i.partID]; !ok {\n\t\ti.cursors[i.partID] = make(map[string]*currentCursor)\n\t}\n\tcc, ok := i.cursors[i.partID][owner]\n\tif !ok {\n\t\tcc = &currentCursor{}\n\t\tif i.config.Replica {\n\t\t\tcc.replica = cursor\n\t\t} else {\n\t\t\tcc.primary = cursor\n\t\t}\n\t\ti.cursors[i.partID][owner] = cc\n\t\treturn\n\t}\n\n\tif i.config.Replica {\n\t\tcc.replica = cursor\n\t} else {\n\t\tcc.primary = cursor\n\t}\n\ti.cursors[i.partID][owner] = cc\n}\n\nfunc (i *ClusterIterator) loadCursor(owner string) uint64 {\n\tif _, ok := i.cursors[i.partID]; !ok {\n\t\treturn 0\n\t}\n\tcc, ok := i.cursors[i.partID][owner]\n\tif !ok {\n\t\treturn 0\n\t}\n\tif i.config.Replica {\n\t\treturn cc.replica\n\t}\n\treturn cc.primary\n}\n\nfunc (i *ClusterIterator) updateIterator(keys []string, cursor uint64, owner string) {\n\tfor _, key := range keys {\n\t\tif _, ok := i.partitionKeys[key]; !ok {\n\t\t\ti.page = append(i.page, key)\n\t\t\ti.partitionKeys[key] = struct{}{}\n\t\t}\n\t}\n\ti.updateCursor(owner, cursor)\n}\n\nfunc (i *ClusterIterator) getOwners() []string {\n\tvar raw []string\n\tif i.config.Replica {\n\t\traw = i.routingTable[i.partID].ReplicaOwners\n\t} else {\n\t\traw = i.routingTable[i.partID].PrimaryOwners\n\t}\n\tvar owners []string\n\t// Make a safe copy of the raw.\n\tfor _, owner := range raw {\n\t\towners = append(owners, owner)\n\t}\n\treturn owners\n}\n\nfunc (i *ClusterIterator) removeScannedOwner(idx int) {\n\tif i.config.Replica {\n\t\tif len(i.route.ReplicaOwners) > 0 && len(i.route.ReplicaOwners) > idx {\n\t\t\ti.route.ReplicaOwners = append(i.route.ReplicaOwners[:idx], i.route.ReplicaOwners[idx+1:]...)\n\t\t}\n\t} else {\n\t\tif len(i.route.PrimaryOwners) > 0 && len(i.route.PrimaryOwners) > idx {\n\t\t\ti.route.PrimaryOwners = append(i.route.PrimaryOwners[:idx], i.route.PrimaryOwners[idx+1:]...)\n\t\t}\n\t}\n}\n\nfunc (i *ClusterIterator) scanOnOwners() error {\n\towners := i.getOwners()\n\n\tfor idx, owner := range owners {\n\t\tcursor := i.loadCursor(owner)\n\n\t\t// Build a scan command here\n\t\ts := protocol.NewScan(i.partID, i.dm.Name(), cursor)\n\t\tif i.config.HasCount {\n\t\t\ts.SetCount(i.config.Count)\n\t\t}\n\t\tif i.config.HasMatch {\n\t\t\ts.SetMatch(i.config.Match)\n\t\t}\n\t\tif i.config.Replica {\n\t\t\ts.SetReplica()\n\t\t}\n\n\t\tscanCmd := s.Command(i.ctx)\n\t\t// Fetch a Redis client for the given owner.\n\t\trc := i.clusterClient.client.Get(owner)\n\t\terr := rc.Process(i.ctx, scanCmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tkeys, newCursor, err := scanCmd.Result()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ti.updateIterator(keys, newCursor, owner)\n\t\tif newCursor == 0 {\n\t\t\ti.removeScannedOwner(idx)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (i *ClusterIterator) resetPage() {\n\tif len(i.page) != 0 {\n\t\ti.page = []string{}\n\t}\n\ti.pos = 0\n}\n\nfunc (i *ClusterIterator) fetchData() error {\n\ti.config.Replica = false\n\tif err := i.scanner(); err != nil {\n\t\treturn err\n\t}\n\n\ti.config.Replica = true\n\treturn i.scanner()\n}\n\nfunc (i *ClusterIterator) reset() {\n\ti.partitionKeys = make(map[string]struct{})\n\ti.resetPage()\n\ti.loadRoute()\n}\n\nfunc (i *ClusterIterator) next() bool {\n\tif len(i.page) != 0 {\n\t\ti.pos++\n\t\tif i.pos <= len(i.page) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\ti.resetPage()\n\n\tfor {\n\t\tif err := i.fetchData(); err != nil {\n\t\t\ti.logger.Printf(\"[ERROR] Failed to fetch data: %s\", err)\n\t\t\treturn false\n\t\t}\n\t\tif len(i.page) != 0 {\n\t\t\t// We have data on the page to read. Stop the iteration.\n\t\t\tbreak\n\t\t}\n\n\t\tif len(i.route.PrimaryOwners) == 0 && len(i.route.ReplicaOwners) == 0 {\n\t\t\t// We completed scanning all the owners. Stop the iteration.\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(i.page) == 0 && len(i.route.PrimaryOwners) == 0 && len(i.route.ReplicaOwners) == 0 {\n\t\ti.partID++\n\t\tif i.partID >= i.partitionCount {\n\t\t\treturn false\n\t\t}\n\t\ti.reset()\n\t\treturn i.next()\n\t}\n\ti.pos = 1\n\treturn true\n}\n\n// Next returns true if there is more key in the iterator implementation.\n// Otherwise, it returns false\nfunc (i *ClusterIterator) Next() bool {\n\ti.mtx.Lock()\n\tdefer i.mtx.Unlock()\n\n\tselect {\n\tcase <-i.ctx.Done():\n\t\treturn false\n\tdefault:\n\t}\n\n\treturn i.next()\n}\n\n// Key returns a key name from the distributed map.\nfunc (i *ClusterIterator) Key() string {\n\ti.mtx.Lock()\n\tdefer i.mtx.Unlock()\n\n\tvar key string\n\tif i.pos > 0 && i.pos <= len(i.page) {\n\t\tkey = i.page[i.pos-1]\n\t}\n\treturn key\n}\n\nfunc (i *ClusterIterator) fetchRoutingTablePeriodically() {\n\tdefer i.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase <-i.ctx.Done():\n\t\t\treturn\n\t\tcase <-time.After(time.Second):\n\t\t\tif err := i.fetchRoutingTable(); err != nil {\n\t\t\t\ti.logger.Printf(\"[ERROR] Failed to fetch the latest version of the routing table: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (i *ClusterIterator) fetchRoutingTable() error {\n\troutingTable, err := i.clusterClient.RoutingTable(i.ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ti.routingTableMtx.Lock()\n\tdefer i.routingTableMtx.Unlock()\n\n\t// Partition count is a constant, actually. It has to be greater than zero.\n\ti.partitionCount = uint64(len(routingTable))\n\ti.routingTable = routingTable\n\treturn nil\n}\n\n// Close stops the iteration and releases allocated resources.\nfunc (i *ClusterIterator) Close() {\n\tselect {\n\tcase <-i.ctx.Done():\n\t\treturn\n\tdefault:\n\t}\n\n\ti.cancel()\n\n\t// await for routing table updater\n\ti.wg.Wait()\n}\n"
  },
  {
    "path": "cluster_iterator_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestClusterClient_ScanMatch(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tevenKeys := make(map[string]bool)\n\tfor i := 0; i < 100; i++ {\n\t\tvar key string\n\t\tif i%2 == 0 {\n\t\t\tkey = fmt.Sprintf(\"even:%s\", testutil.ToKey(i))\n\t\t\tevenKeys[key] = false\n\t\t} else {\n\t\t\tkey = fmt.Sprintf(\"odd:%s\", testutil.ToKey(i))\n\t\t}\n\t\terr = dm.Put(ctx, key, i)\n\t\trequire.NoError(t, err)\n\t}\n\ti, err := dm.Scan(ctx, Match(\"^even:\"))\n\trequire.NoError(t, err)\n\tvar count int\n\tdefer i.Close()\n\n\tfor i.Next() {\n\t\tcount++\n\t\trequire.Contains(t, evenKeys, i.Key())\n\t}\n\trequire.Equal(t, 50, count)\n}\n\nfunc TestClusterClient_Scan(t *testing.T) {\n\tcl := newTestOlricCluster(t)\n\tdb := cl.addMember(t)\n\tcl.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tallKeys := make(map[string]bool)\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), i)\n\t\trequire.NoError(t, err)\n\t\tallKeys[testutil.ToKey(i)] = false\n\t}\n\n\ti, err := dm.Scan(ctx)\n\trequire.NoError(t, err)\n\n\tvar count int\n\tdefer i.Close()\n\n\tfor i.Next() {\n\t\tcount++\n\t\trequire.Contains(t, allKeys, i.Key())\n\t}\n\trequire.Equal(t, 100, count)\n}\n"
  },
  {
    "path": "cluster_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestOlric_ClusterRoutingTable_clusterRoutingTableCommandHandler(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\trtCmd := protocol.NewClusterRoutingTable().Command(db.ctx)\n\trc := db.client.Get(db.rt.This().String())\n\terr := rc.Process(db.ctx, rtCmd)\n\trequire.NoError(t, err)\n\tslice, err := rtCmd.Slice()\n\trequire.NoError(t, err)\n\n\trt, err := mapToRoutingTable(slice)\n\trequire.NoError(t, err)\n\trequire.Len(t, rt, int(db.config.PartitionCount))\n\tfor _, route := range rt {\n\t\trequire.Len(t, route.PrimaryOwners, 1)\n\t\trequire.Equal(t, db.rt.This().String(), route.PrimaryOwners[0])\n\t\trequire.Len(t, route.ReplicaOwners, 0)\n\t}\n}\n\nfunc TestOlric_RoutingTable_Standalone(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\trt, err := db.routingTable(context.Background())\n\trequire.NoError(t, err)\n\trequire.Len(t, rt, int(db.config.PartitionCount))\n\tfor _, route := range rt {\n\t\trequire.Len(t, route.PrimaryOwners, 1)\n\t\trequire.Equal(t, db.rt.This().String(), route.PrimaryOwners[0])\n\t\trequire.Len(t, route.ReplicaOwners, 0)\n\t}\n}\n"
  },
  {
    "path": "cmd/olric-server/main.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Server implementation for Olric. Olric Server basically manages configuration for you.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com/olric-data/olric\"\n\t\"github.com/olric-data/olric/cmd/olric-server/server\"\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/sean-/seed\"\n)\n\nfunc usage() {\n\tvar msg = `Usage: olric-server [options] ...\n\nDistributed key-value store and cache\n\nOptions:\n  -h, --help    Print this message and exit.\n  -v, --version Print the version number and exit.\n  -c, --config  Sets configuration file path. Default is olric-server-local.yaml in the\n                current folder. Set OLRIC_SERVER_CONFIG to overwrite it.\n\nThe Go runtime version %s\nReport bugs to https://github.com/olric-data/olric/issues\n`\n\t_, err := fmt.Fprintf(os.Stdout, msg, runtime.Version())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\ntype arguments struct {\n\tconfig  string\n\thelp    bool\n\tversion bool\n}\n\nconst (\n\t// DefaultConfigFile is the default configuration file path on a Unix-based operating system.\n\tDefaultConfigFile = \"olric-server-local.yaml\"\n\n\t// EnvConfigFile is the name of environment variable which can be used to override default configuration file path.\n\tEnvConfigFile = \"OLRIC_SERVER_CONFIG\"\n)\n\nfunc main() {\n\targs := &arguments{}\n\n\t// Parse command line parameters\n\tf := flag.NewFlagSet(os.Args[0], flag.ContinueOnError)\n\tf.SetOutput(ioutil.Discard)\n\tf.BoolVar(&args.help, \"h\", false, \"\")\n\tf.BoolVar(&args.help, \"help\", false, \"\")\n\n\tf.BoolVar(&args.version, \"version\", false, \"\")\n\tf.BoolVar(&args.version, \"v\", false, \"\")\n\n\tf.StringVar(&args.config, \"config\", DefaultConfigFile, \"\")\n\tf.StringVar(&args.config, \"c\", DefaultConfigFile, \"\")\n\n\tif err := f.Parse(os.Args[1:]); err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, fmt.Sprintf(\"parsing error: %v\\n\", err))\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tif args.version {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"olric-server version %s %s %s/%s\\n\",\n\t\t\tolric.ReleaseVersion,\n\t\t\truntime.Version(),\n\t\t\truntime.GOOS,\n\t\t\truntime.GOARCH,\n\t\t)\n\t\treturn\n\t} else if args.help {\n\t\tusage()\n\t\treturn\n\t}\n\n\t// MustInit provides guaranteed secure seeding.  If `/dev/urandom` is not\n\t// available, MustInit will panic() with an error indicating why reading from\n\t// `/dev/urandom` failed.  MustInit() will upgrade the seed if for some reason a\n\t// call to Init() failed in the past.\n\tseed.MustInit()\n\n\tenvPath := os.Getenv(EnvConfigFile)\n\tif envPath != \"\" {\n\t\targs.config = envPath\n\t}\n\n\tc, err := config.Load(args.config)\n\tif err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"failed to load the configuration file: %s: %v\\n\", args.config, err)\n\t\tos.Exit(1)\n\t}\n\n\ts, err := server.New(c)\n\tif err != nil {\n\t\tc.Logger.Fatalf(\"[ERROR] Failed to create a new Olric instance: %v\", err)\n\t}\n\n\tif err = s.Start(); err != nil {\n\t\tc.Logger.Printf(\"[ERROR] Failed to start Olric: %v\", err)\n\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tdefer cancel()\n\t\tif err := s.Shutdown(ctx); err != nil {\n\t\t\tc.Logger.Printf(\"[ERROR] Failed to shutdown Olric: %v\", err)\n\t\t}\n\t\tc.Logger.Fatal(\"[ERROR] Quit unexpectedly!\")\n\t}\n\n\tc.Logger.Print(\"[INFO] Quit!\")\n}\n"
  },
  {
    "path": "cmd/olric-server/olric-server-local.yaml",
    "content": "#\n# IMPORTANT NOTE: This configuration file is intended for testing and local development.\n#\nserver:\n  # BindAddr denotes the address that Olric will bind to for communication\n  # with other Olric nodes.\n  bindAddr: localhost\n\n  # BindPort denotes the address that Olric will bind to for communication\n  # with other Olric nodes.\n  bindPort: 3320\n\n  # KeepAlivePeriod denotes whether the operating system should send\n  # keep-alive messages on the connection.\n  keepAlivePeriod: 300s\n\n  # IdleClose will automatically close idle connections after the specified duration.\n  # Use zero to disable this feature.\n  # idleClose: 300s\n\n  # Timeout for bootstrap control\n  #\n  # An Olric node checks operation status before taking any action for the\n  # cluster events, responding incoming requests and running API functions.\n  # Bootstrapping status is one of the most important checkpoints for an\n  # \"operable\" Olric node. BootstrapTimeout sets a deadline to check\n  # bootstrapping status without blocking indefinitely.\n  bootstrapTimeout: 5s\n\n  # PartitionCount is 271, by default.\n  partitionCount: 271\n\n  # ReplicaCount is 1, by default.\n  replicaCount: 1\n\n  # Minimum number of successful writes to return a response for a write request.\n  writeQuorum: 1\n\n  # Minimum number of successful reads to return a response for a read request.\n  readQuorum: 1\n\n  # Switch to control read-repair algorithm which helps to reduce entropy.\n  readRepair: false\n\n  # Default value is SyncReplicationMode.\n  replicationMode: 0 # sync mode. for async, set 1\n\n  # Minimum number of members to form a cluster and run any query on the cluster.\n  memberCountQuorum: 1\n\n  # Coordinator member pushes the routing table to cluster members in the case of\n  # node join or left events. It also pushes the table periodically. routingTablePushInterval\n  # is the interval between subsequent calls. Default is 1 minute.\n  routingTablePushInterval: 1m\n\n  # Olric can send push cluster events to cluster.events channel. Available cluster events:\n  #\n  # * node-join-event\n  # * node-left-event\n  # * fragment-migration-event\n  # * fragment-received-event\n  #\n  # If you want to receive these events, set true to EnableClusterEventsChannel and subscribe to\n  # cluster.events channel. Default is false.\n  enableClusterEventsChannel: true\n\n#authentication:\n  #password: \"your-password\"\n  \nclient:\n  # Timeout for TCP dial.\n  #\n  # The timeout includes name resolution, if required. When using TCP, and the host in the address parameter\n  # resolves to multiple IP addresses, the timeout is spread over each consecutive dial, such that each is\n  # given an appropriate fraction of the time to connect.\n  dialTimeout: 5s\n\n  # Timeout for socket reads. If reached, commands will fail\n  # with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.\n  # Default is DefaultReadTimeout\n  readTimeout: 3s\n\n  # Timeout for socket writes. If reached, commands will fail\n  # with a timeout instead of blocking.\n  # Default is DefaultWriteTimeout\n  writeTimeout: 3s\n\n  # Maximum number of retries before giving up.\n  # Default is 3 retries; -1 (not 0) disables retries.\n  #maxRetries: 3\n\n  # Minimum backoff between each retry.\n  # Default is 8 milliseconds; -1 disables backoff.\n  #minRetryBackoff: 8ms\n\n  # Maximum backoff between each retry.\n  # Default is 512 milliseconds; -1 disables backoff.\n  #maxRetryBackoff: 512ms\n\n  # Type of connection pool.\n  # true for FIFO pool, false for LIFO pool.\n  # Note that fifo has higher overhead compared to lifo.\n  #poolFIFO: false\n\n  # Maximum number of socket connections.\n  # Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.\n  #poolSize: 0\n\n  # Minimum number of idle connections which is useful when establishing\n  # new connection is slow.\n  #minIdleConns:\n\n  # Connection age at which client retires (closes) the connection.\n  # Default is to not close aged connections.\n  #maxConnAge:\n\n  # Amount of time client waits for connection if all connections are busy before\n  # returning an error. Default is ReadTimeout + 1 second.\n  #poolTimeout: 3s\n\n  # Amount of time after which client closes idle connections.\n  # Should be less than server's timeout.\n  # Default is 5 minutes. -1 disables idle timeout check.\n  #idleTimeout: 5m\n\n  # Frequency of idle checks made by idle connections reaper.\n  # Default is 1 minute. -1 disables idle connections reaper,\n  # but idle connections are still discarded by the client\n  # if IdleTimeout is set.\n  #idleCheckFrequency: 1m\n\n\nlogging:\n  # DefaultLogVerbosity denotes default log verbosity level.\n  #\n  # * 1 - Generally useful for this to ALWAYS be visible to an operator\n  #   * Programmer errors\n  #   * Logging extra info about a panic\n  #   * CLI argument handling\n  # * 2 - A reasonable default log level if you don't want verbosity.\n  #   * Information about config (listening on X, watching Y)\n  #   * Errors that repeat frequently that relate to conditions that can be\n  #     corrected\n  # * 3 - Useful steady state information about the service and\n  #     important log messages that may correlate to\n  #   significant changes in the system.  This is the recommended default log\n  #     level for most systems.\n  #   * Logging HTTP requests and their exit code\n  #   * System state changing\n  #   * Controller state change events\n  #   * Scheduler log messages\n  # * 4 - Extended information about changes\n  #   * More info about system state changes\n  # * 5 - Debug level verbosity\n  #   * Logging in particularly thorny parts of code where you may want to come\n  #     back later and check it\n  # * 6 - Trace level verbosity\n  #   * Context to understand the steps leading up to neterrors and warnings\n  #   * More information for troubleshooting reported issues\n  verbosity: 3\n\n  # Default LogLevel is DEBUG. Available levels: \"DEBUG\", \"WARN\", \"ERROR\", \"INFO\"\n  level: WARN\n  output: stderr\n\nmemberlist:\n  environment: local\n\n  # Configuration related to what address to bind to and ports to\n  # listen on. The port is used for both UDP and TCP gossip. It is\n  # assumed other nodes are running on this port, but they do not need\n  # to.\n  bindAddr: localhost\n  bindPort: 3322\n\n  # EnableCompression is used to control message compression. This can\n  # be used to reduce bandwidth usage at the cost of slightly more CPU\n  # utilization. This is only available starting at protocol version 1.\n  enableCompression: false\n\n  # JoinRetryInterval is the time gap between attempts to join an existing\n  # cluster.\n  joinRetryInterval: 1ms\n\n  # MaxJoinAttempts denotes the maximum number of attemps to join an existing\n  # cluster before forming a new one.\n  maxJoinAttempts: 1\n\n  # See service discovery plugins\n  #peers:\n  #  - \"localhost:3325\"\n\n  #advertiseAddr: \"\"\n  #advertisePort: 3322\n  #suspicionMaxTimeoutMult: 6\n  #disableTCPPings: false\n  #awarenessMaxMultiplier: 8\n  #gossipNodes: 3\n  #gossipVerifyIncoming: true\n  #gossipVerifyOutgoing: true\n  #dnsConfigPath: \"/etc/resolv.conf\"\n  #handoffQueueDepth: 1024\n  #udpBufferSize: 1400\n\ndmaps:\n  engine:\n    name: ramblock\n    config:\n      tableSize: 524288 # bytes\n#  checkEmptyFragmentsInterval: 1m\n#  triggerCompactionInterval: 10m\n#  numEvictionWorkers: 1\n#  maxIdleDuration: \"\"\n#  ttlDuration: \"100s\"\n#  maxKeys: 100000\n#  maxInuse: 1000000\n#  lRUSamples: 10\n#  evictionPolicy: \"LRU\"\n#  custom:\n#   foobar:\n#      maxIdleDuration: \"60s\"\n#      ttlDuration: \"300s\"\n#      maxKeys: 500000\n#      lRUSamples: 20\n#      evictionPolicy: \"NONE\"\n\n\n#serviceDiscovery:\n#  # path is a required property and used by Olric. It has to be a full path.\n#  path: \"/home/burak/go/src/github.com/olric-data/olric-consul-plugin/consul.so\"\n#\n#  # provider is just informal,\n#  provider: \"consul\"\n#\n#  # Plugin specific configuration\n#  # Consul server, used by the plugin. It's required\n#  address: \"http://127.0.0.1:8500\"\n#\n#  # Specifies that the server should return only nodes with all checks in the passing state.\n#  passingOnly: true\n#\n#  # Missing health checks from the request will be deleted from the agent. Using this parameter\n#  # allows to idempotently register a service and its checks without having to manually deregister\n#  # checks.\n#  replaceExistingChecks: true\n#\n#  # InsecureSkipVerify controls whether a client verifies the\n#  # server's certificate chain and host name.\n#  # If InsecureSkipVerify is true, TLS accepts any certificate\n#  # presented by the server and any host name in that certificate.\n#  # In this mode, TLS is susceptible to man-in-the-middle attacks.\n#  # This should be used only for testing.\n#  insecureSkipVerify: true\n#\n#  # service record\n#  payload: '\n#      {\n#          \"Name\": \"olric-cluster\",\n#          \"ID\": \"olric-node-1\",\n#          \"Tags\": [\n#            \"primary\",\n#            \"v1\"\n#          ],\n#          \"Address\": \"localhost\",\n#          \"Port\": 3322,\n#          \"EnableTagOverride\": false,\n#          \"check\": {\n#            \"name\": \"Olric node on 3322\",\n#            \"tcp\": \"0.0.0.0:3322\",\n#            \"interval\": \"10s\",\n#            \"timeout\": \"1s\"\n#          }\n#      }\n#'\n#\n#\n#serviceDiscovery:\n#  provider: \"k8s\"\n#  path: \"/Users/buraksezer/go/src/github.com/olric-data/olric-cloud-plugin/olric-cloud-plugin.so\"\n#  args: 'label_selector=\"app = olric-server\"'\n"
  },
  {
    "path": "cmd/olric-server/server/server.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n/*Package server provides a standalone server implementation for Olric*/\npackage server\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\n\t\"github.com/olric-data/olric\"\n\t\"github.com/olric-data/olric/config\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\n// OlricServer represents an instance of the Olric distributed in-memory data structure store.\n// It encapsulates logging, configuration, the Olric database instance, and an error group for\n// concurrency management.\ntype OlricServer struct {\n\tlog    *log.Logger\n\tconfig *config.Config\n\tdb     *olric.Olric\n\terrGr  errgroup.Group\n}\n\n// New initializes a new OlricServer instance using the provided configuration and returns it or an error.\nfunc New(c *config.Config) (*OlricServer, error) {\n\tdb, err := olric.New(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &OlricServer{\n\t\tconfig: c,\n\t\tlog:    c.Logger,\n\t\tdb:     db,\n\t}, nil\n}\n\n// waitForInterrupt waits for termination signals (SIGTERM, SIGINT) to gracefully shut down the Olric server instance.\nfunc (s *OlricServer) waitForInterrupt() {\n\tshutDownChan := make(chan os.Signal, 1)\n\tsignal.Notify(shutDownChan, syscall.SIGTERM, syscall.SIGINT)\n\tch := <-shutDownChan\n\ts.log.Printf(\"[INFO] Signal catched: %s\", ch.String())\n\n\t// Awaits for shutdown\n\ts.errGr.Go(func() error {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tdefer cancel()\n\n\t\tif err := s.db.Shutdown(ctx); err != nil {\n\t\t\ts.log.Printf(\"[ERROR] Failed to shutdown Olric: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\n\t// This is not a goroutine leak. The process will quit.\n\tgo func() {\n\t\ts.log.Printf(\"[INFO] Awaiting for background tasks\")\n\t\ts.log.Printf(\"[INFO] Press CTRL+C or send SIGTERM/SIGINT to quit immediately\")\n\n\t\tforceQuitCh := make(chan os.Signal, 1)\n\t\tsignal.Notify(forceQuitCh, syscall.SIGTERM, syscall.SIGINT)\n\t\tch := <-forceQuitCh\n\n\t\ts.log.Printf(\"[INFO] Signal caught: %s\", ch.String())\n\t\ts.log.Printf(\"[INFO] Quits with exit code 1\")\n\t\tos.Exit(1)\n\t}()\n}\n\n// Start launches the Olric server instance and begins listening for incoming requests and termination signals.\nfunc (s *OlricServer) Start() error {\n\ts.log.Printf(\"[INFO] pid: %d has been started\", os.Getpid())\n\t// Wait for SIGTERM or SIGINT\n\tgo s.waitForInterrupt()\n\n\ts.errGr.Go(func() error {\n\t\treturn s.db.Start()\n\t})\n\n\treturn s.errGr.Wait()\n}\n\n// Shutdown gracefully stops the Olric server instance, releasing resources and ensuring a clean termination.\nfunc (s *OlricServer) Shutdown(ctx context.Context) error {\n\treturn s.db.Shutdown(ctx)\n}\n"
  },
  {
    "path": "config/authentication.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport \"strings\"\n\ntype Authentication struct {\n\tPassword string\n}\n\n// Sanitize ensures the Authentication configuration is pre-processed and prepared for use, with no changes currently applied.\nfunc (a *Authentication) Sanitize() error {\n\ta.Password = strings.TrimSpace(a.Password)\n\treturn nil\n}\n\n// Validate checks the current Authentication configuration for validity and returns an error if issues are found.\nfunc (a *Authentication) Validate() error {\n\t// Nothing to do\n\treturn nil\n}\n\n// Enabled checks if authentication is enabled by verifying if the password is set and returns true if it is configured.\nfunc (a *Authentication) Enabled() bool {\n\treturn len(a.Password) > 0\n}\n\n// Interface guard\nvar _ IConfig = (*Authentication)(nil)\n"
  },
  {
    "path": "config/client.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com/redis/go-redis/v9\"\n)\n\nconst (\n\tDefaultDialTimeout     = 5 * time.Second\n\tDefaultKeepalive       = 5 * time.Minute\n\tDefaultReadTimeout     = 3 * time.Second\n\tDefaultIdleTimeout     = 5 * time.Minute\n\tDefaultMinRetryBackoff = 8 * time.Millisecond\n\tDefaultMaxRetryBackoff = 512 * time.Millisecond\n\tDefaultMaxRetries      = 3\n)\n\n// Client denotes configuration for TCP clients in Olric and the official Golang client.\ntype Client struct {\n\tAuthentication *Authentication\n\n\t// Dial timeout for establishing new connections.\n\t// Default is 5 seconds.\n\tDialTimeout time.Duration\n\n\t// Timeout for socket reads. If reached, commands will fail\n\t// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.\n\t// Default is 3 seconds.\n\tReadTimeout time.Duration\n\n\t// Timeout for socket writes. If reached, commands will fail\n\t// with a timeout instead of blocking.\n\t// Default is ReadTimeout.\n\tWriteTimeout time.Duration\n\n\t// Dialer creates new network connection and has priority over\n\t// Network and Addr options.\n\tDialer func(ctx context.Context, network, addr string) (net.Conn, error)\n\n\t// Hook that is called when new connection is established.\n\tOnConnect func(ctx context.Context, cn *redis.Conn) error\n\n\t// Maximum number of retries before giving up.\n\t// Default is 3 retries; -1 (not 0) disables retries.\n\tMaxRetries int\n\n\t// Minimum backoff between each retry.\n\t// Default is 8 milliseconds; -1 disables backoff.\n\tMinRetryBackoff time.Duration\n\n\t// Maximum backoff between each retry.\n\t// Default is 512 milliseconds; -1 disables backoff.\n\tMaxRetryBackoff time.Duration\n\n\t// Type of connection pool.\n\t// true for FIFO pool, false for LIFO pool.\n\t// Note that fifo has higher overhead compared to lifo.\n\tPoolFIFO bool\n\n\t// Maximum number of socket connections.\n\t// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.\n\tPoolSize int\n\n\t// Minimum number of idle connections which is useful when establishing\n\t// new connection is slow.\n\tMinIdleConns int\n\n\t// Connection age at which client retires (closes) the connection.\n\t// Default is to not close aged connections.\n\tMaxConnAge time.Duration\n\n\t// Amount of time client waits for connection if all connections\n\t// are busy before returning an error.\n\t// Default is ReadTimeout + 1 second.\n\tPoolTimeout time.Duration\n\n\t// Amount of time after which client closes idle connections.\n\t// Should be less than server's timeout.\n\t// Default is 5 minutes. -1 disables idle timeout check.\n\tIdleTimeout time.Duration\n\n\t// TLS Config to use. When set TLS will be negotiated.\n\tTLSConfig *tls.Config\n\n\t// Limiter interface used to implemented circuit breaker or rate limiter.\n\tLimiter redis.Limiter\n}\n\n// NewClient returns a new configuration object for clients.\nfunc NewClient() *Client {\n\tc := &Client{\n\t\tAuthentication: &Authentication{},\n\t}\n\terr := c.Sanitize()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create a new client configuration: %v\", err))\n\t}\n\treturn c\n}\n\n// Sanitize sets default values to empty configuration variables, if it's possible.\nfunc (c *Client) Sanitize() error {\n\tif err := c.Authentication.Sanitize(); err != nil {\n\t\treturn fmt.Errorf(\"failed to sanitize authentication configuration: %w\", err)\n\t}\n\n\tif c.DialTimeout == 0 {\n\t\tc.DialTimeout = DefaultDialTimeout\n\t}\n\tif c.Dialer == nil {\n\t\tc.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\t\tnetDialer := &net.Dialer{\n\t\t\t\tTimeout:   c.DialTimeout,\n\t\t\t\tKeepAlive: DefaultKeepalive,\n\t\t\t}\n\t\t\tif c.TLSConfig == nil {\n\t\t\t\treturn netDialer.DialContext(ctx, network, addr)\n\t\t\t}\n\t\t\treturn tls.DialWithDialer(netDialer, network, addr, c.TLSConfig)\n\t\t}\n\t}\n\tif c.PoolSize == 0 {\n\t\tc.PoolSize = 10 * runtime.GOMAXPROCS(0)\n\t}\n\tswitch c.ReadTimeout {\n\tcase -1:\n\t\tc.ReadTimeout = 0\n\tcase 0:\n\t\tc.ReadTimeout = DefaultReadTimeout\n\t}\n\tswitch c.WriteTimeout {\n\tcase -1:\n\t\tc.WriteTimeout = 0\n\tcase 0:\n\t\tc.WriteTimeout = c.ReadTimeout\n\t}\n\tif c.PoolTimeout == 0 {\n\t\tc.PoolTimeout = c.ReadTimeout + time.Second\n\t}\n\tif c.IdleTimeout == 0 {\n\t\tc.IdleTimeout = DefaultIdleTimeout\n\t}\n\n\tif c.MaxRetries == -1 {\n\t\tc.MaxRetries = 0\n\t} else if c.MaxRetries == 0 {\n\t\tc.MaxRetries = DefaultMaxRetries\n\t}\n\tswitch c.MinRetryBackoff {\n\tcase -1:\n\t\tc.MinRetryBackoff = 0\n\tcase 0:\n\t\tc.MinRetryBackoff = DefaultMinRetryBackoff\n\t}\n\tswitch c.MaxRetryBackoff {\n\tcase -1:\n\t\tc.MaxRetryBackoff = 0\n\tcase 0:\n\t\tc.MaxRetryBackoff = DefaultMaxRetryBackoff\n\t}\n\n\treturn nil\n}\n\n// Validate finds errors in the current configuration.\nfunc (c *Client) Validate() error {\n\tif err := c.Authentication.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"failed to validate authentication configuration: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc (c *Client) RedisOptions() *redis.Options {\n\t// Note: IdleCheckFrequency is gone since go-redis no longer checks idle connections.\n\t// See https://github.com/redis/go-redis/discussions/2635\n\toptions := &redis.Options{\n\t\tNetwork:         \"tcp\",\n\t\tDialer:          c.Dialer,\n\t\tOnConnect:       c.OnConnect,\n\t\tMaxRetries:      c.MaxRetries,\n\t\tMinRetryBackoff: c.MinRetryBackoff,\n\t\tMaxRetryBackoff: c.MaxRetryBackoff,\n\t\tDialTimeout:     c.DialTimeout,\n\t\tReadTimeout:     c.ReadTimeout,\n\t\tWriteTimeout:    c.WriteTimeout,\n\t\tPoolFIFO:        c.PoolFIFO,\n\t\tPoolSize:        c.PoolSize,\n\t\tMinIdleConns:    c.MinIdleConns,\n\t\tConnMaxLifetime: c.MaxConnAge,\n\t\tPoolTimeout:     c.PoolTimeout,\n\t\tConnMaxIdleTime: c.IdleTimeout,\n\t\tTLSConfig:       c.TLSConfig,\n\t\tLimiter:         c.Limiter,\n\t}\n\tif c.Authentication.Enabled() {\n\t\toptions.Password = c.Authentication.Password\n\t}\n\treturn options\n}\n\n// Interface guard\nvar _ IConfig = (*Client)(nil)\n"
  },
  {
    "path": "config/config.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/hashicorp/memberlist\"\n\t\"github.com/olric-data/olric/hasher\"\n)\n\n// IConfig is an interface that has to be implemented by Config and its nested\n// structs. It provides a clear and granular way to sanitize and validate\n// the configuration.\ntype IConfig interface {\n\t// Sanitize methods should be used to set defaults.\n\tSanitize() error\n\n\t// Validate method should be used to find configuration errors.\n\tValidate() error\n}\n\nconst (\n\t// SyncReplicationMode enables sync replication mode which means that the\n\t// caller is blocked until write/delete operation is applied by replica\n\t// owners. The default mode is SyncReplicationMode\n\tSyncReplicationMode = 0\n\n\t// AsyncReplicationMode enables async replication mode which means that\n\t// write/delete operations are done in a background task.\n\tAsyncReplicationMode = 1\n)\n\nconst (\n\tLogLevelDebug = \"DEBUG\"\n\tLogLevelWarn  = \"WARN\"\n\tLogLevelError = \"ERROR\"\n\tLogLevelInfo  = \"INFO\"\n)\n\nconst (\n\t// DefaultPort is for Olric\n\tDefaultPort = 3320\n\n\t// DefaultDiscoveryPort is for memberlist\n\tDefaultDiscoveryPort = 3322\n\n\t// DefaultPartitionCount denotes default partition count in the cluster.\n\tDefaultPartitionCount = 271\n\n\t// DefaultLoadFactor is used by the consistent hashing function. Keep it small.\n\tDefaultLoadFactor = 1.25\n\n\t// DefaultLogLevel determines the log level without extra configuration.\n\t// It's DEBUG.\n\tDefaultLogLevel = LogLevelDebug\n\n\t// DefaultLogVerbosity denotes default log verbosity level.\n\t//\n\t// * flog.V(1) - Generally useful for this to ALWAYS be visible to an operator\n\t//   * Programmer errors\n\t//   * Logging extra info about a panic\n\t//   * CLI argument handling\n\t// * flog.V(2) - A reasonable default log level if you don't want verbosity.\n\t//   * Information about config (listening on X, watching Y)\n\t//   * Errors that repeat frequently that relate to conditions that can be\n\t//     corrected (pod detected as unhealthy)\n\t// * flog.V(3) - Useful steady state information about the service and\n\t//     important log messages that may correlate to\n\t//   significant changes in the system.  This is the recommended default log\n\t//     level for most systems.\n\t//   * Logging HTTP requests and their exit code\n\t//   * System state changing (killing pod)\n\t//   * Controller state change events (starting pods)\n\t//   * Scheduler log messages\n\t// * flog.V(4) - Extended information about changes\n\t//   * More info about system state changes\n\t// * flog.V(5) - Debug level verbosity\n\t//   * Logging in particularly thorny parts of code where you may want to come\n\t//     back later and check it\n\t// * flog.V(6) - Trace level verbosity\n\t//   * Context to understand the steps leading up to neterrors and warnings\n\t//   * More information for troubleshooting reported issues\n\tDefaultLogVerbosity = 3\n\n\t// MinimumReplicaCount denotes default and minimum replica count in an Olric\n\t// cluster.\n\tMinimumReplicaCount = 1\n\n\t// DefaultBootstrapTimeout denotes default timeout value to check bootstrapping\n\t// status.\n\tDefaultBootstrapTimeout = 10 * time.Second\n\n\t// DefaultJoinRetryInterval denotes a time gap between sequential join attempts.\n\tDefaultJoinRetryInterval = time.Second\n\n\t// DefaultMaxJoinAttempts denotes a maximum number of failed join attempts\n\t// before forming a standalone cluster.\n\tDefaultMaxJoinAttempts = 10\n\n\t// MinimumMemberCountQuorum denotes minimum required count of members to form\n\t// a cluster.\n\tMinimumMemberCountQuorum = 1\n\n\t// DefaultLRUSamples is a sane default for randomly selected keys\n\t// in approximate LRU implementation. It's 5.\n\tDefaultLRUSamples int = 5\n\n\t// LRUEviction assigns this as EvictionPolicy in order to enable LRU eviction\n\t// algorithm.\n\tLRUEviction EvictionPolicy = \"LRU\"\n\n\t// DefaultStorageEngine denotes the storage engine implementation provided by\n\t// Olric project.\n\tDefaultStorageEngine = \"ramblock\"\n\n\t// DefaultRoutingTablePushInterval is interval between routing table push events.\n\tDefaultRoutingTablePushInterval = time.Minute\n\n\t// DefaultTriggerBalancerInterval is interval between two sequential call of balancer worker.\n\tDefaultTriggerBalancerInterval = 15 * time.Second\n\n\t// DefaultCheckEmptyFragmentsInterval is the default value of interval between\n\t// two sequential call of empty fragment cleaner. It's one minute by default.\n\tDefaultCheckEmptyFragmentsInterval = time.Minute\n\n\t// DefaultTriggerCompactionInterval is the default value of interval between\n\t// two sequential call of compaction workers. The compaction worker works until\n\t// its work is done. It's 10 minutes by default.\n\tDefaultTriggerCompactionInterval = 10 * time.Minute\n\n\t// DefaultLeaveTimeout is the default value of maximum amount of time before\n\tDefaultLeaveTimeout = 5 * time.Second\n\n\tDefaultReadQuorum        = 1\n\tDefaultWriteQuorum       = 1\n\tDefaultMemberCountQuorum = 1\n\n\t// DefaultKeepAlivePeriod is the default value of TCP keepalive. It's 300 seconds.\n\t// This option is useful in order to detect dead peers (clients that cannot\n\t// be reached even if they look connected). Moreover, if there is network\n\t// equipment between clients and servers that need to see some traffic in\n\t// order to take the connection open, the option will prevent unexpected\n\t// connection closed events.\n\tDefaultKeepAlivePeriod = 300 * time.Second\n)\n\n// Config represents the configuration structure for customizing the behavior and properties of Olric.\ntype Config struct {\n\t// Authentication defines authentication settings, including password protection, for securing access.\n\tAuthentication *Authentication\n\n\t// Interface denotes a binding interface. It can be used instead of BindAddr\n\t// if the interface is known but not the address. If both are provided, then\n\t// Olric verifies that the interface has the bind address that is provided.\n\tInterface string\n\n\t// LogVerbosity denotes the level of message verbosity. The default value\n\t// is 3. Valid values are between 1 to 6.\n\tLogVerbosity int32\n\n\t// Default LogLevel is DEBUG. Available levels: \"DEBUG\", \"WARN\", \"ERROR\", \"INFO\"\n\tLogLevel string\n\n\t// BindAddr denotes the address that Olric will bind to for communication\n\t// with other Olric nodes.\n\tBindAddr string\n\n\t// BindPort denotes the address that Olric will bind to for communication\n\t// with other Olric nodes.\n\tBindPort int\n\n\t// Client denotes configuration for TCP clients in Olric and the official\n\t// Golang client.\n\tClient *Client\n\n\t// KeepAlivePeriod denotes whether the operating system should send\n\t// keep-alive messages on the connection.\n\tKeepAlivePeriod time.Duration\n\n\t// IdleClose will automatically close idle connections after the specified duration.\n\t// Use zero to disable this feature.\n\tIdleClose time.Duration\n\n\t// Timeout for bootstrap control\n\t//\n\t// An Olric node checks operation status before taking any action for the\n\t// cluster events, responding incoming requests and running API functions.\n\t// Bootstrapping status is one of the most important checkpoints for an\n\t// \"operable\" Olric node. BootstrapTimeout sets a deadline to check\n\t// bootstrapping status without blocking indefinitely.\n\tBootstrapTimeout time.Duration\n\n\t// Coordinator member pushes the routing table to cluster members in the case of\n\t// node join or left events. It also pushes the table periodically. RoutingTablePushInterval\n\t// is the interval between subsequent calls. Default is 1 minute.\n\tRoutingTablePushInterval time.Duration\n\n\t// TriggerBalancerInterval is interval between two sequential call of balancer worker.\n\tTriggerBalancerInterval time.Duration\n\n\t// The list of host:port which are used by memberlist for discovery.\n\t// Don't confuse it with Name.\n\tPeers []string\n\n\t// PartitionCount is 271, by default.\n\tPartitionCount uint64\n\n\t// ReplicaCount is 1, by default.\n\tReplicaCount int\n\n\t// Minimum number of successful reads to return a response for a read request.\n\tReadQuorum int\n\n\t// Minimum number of successful writes to return a response for a write request.\n\tWriteQuorum int\n\n\t// Minimum number of members to form a cluster and run any query on the cluster.\n\tMemberCountQuorum int32\n\n\t// Switch to control read-repair algorithm which helps to reduce entropy.\n\tReadRepair bool\n\n\t// Default value is SyncReplicationMode.\n\tReplicationMode int\n\n\t// LoadFactor is used by consistent hashing function. It determines the maximum\n\t// load for a server in the cluster. Keep it small.\n\tLoadFactor float64\n\n\t// Olric can send push cluster events to cluster.events channel. Available cluster events:\n\t//\n\t// * node-join-event\n\t// * node-left-event\n\t// * fragment-migration-event\n\t// * fragment-received-event\n\t//\n\t// If you want to receive these events, set true to EnableClusterEventsChannel and subscribe to\n\t// cluster.events channel. Default is false.\n\tEnableClusterEventsChannel bool\n\n\t// Default hasher is github.com/cespare/xxhash/v2\n\tHasher hasher.Hasher\n\n\t// LogOutput is the writer where logs should be sent when no custom logger\n\t// is provided. If unset, stderr is used by default.\n\t// If Logger is set, LogOutput is ignored.\n\tLogOutput io.Writer\n\n\t// Logger is a user-provided custom logger. When this is set, Olric will use\n\t// it as-is and will not inspect or modify LogOutput.\n\tLogger *log.Logger\n\n\t// DMaps denotes a global configuration for DMaps. You can still overwrite it\n\t// by setting a DMap for a particular distributed map via DMaps.Custom field.\n\t// Most of the fields are related with distributed cache implementation.\n\tDMaps *DMaps\n\n\t// JoinRetryInterval is the time gap between attempts to join an existing\n\t// cluster.\n\tJoinRetryInterval time.Duration\n\n\t// MaxJoinAttempts denotes the maximum number of attempts to join an existing\n\t// cluster before forming a new one.\n\tMaxJoinAttempts int\n\n\t// Callback function. Olric calls this after\n\t// the server is ready to accept new connections.\n\tStarted func()\n\n\t// ServiceDiscovery is a map that contains plugins implement ServiceDiscovery\n\t// interface. See pkg/service_discovery/service_discovery.go for details.\n\tServiceDiscovery map[string]interface{}\n\n\t// Interface denotes a binding interface. It can be used instead of\n\t// memberlist.Loader.BindAddr if the interface is known but not the address.\n\t// If both are provided, then Olric verifies that the interface has the bind\n\t// address that is provided.\n\tMemberlistInterface string\n\n\t// Olric will broadcast a leave message but will not shut down the background\n\t// listeners, meaning the node will continue participating in gossip and state\n\t// updates.\n\t//\n\t// Sending a leave message will block until the leave message is successfully\n\t// broadcast to a member of the cluster, if any exist or until a specified timeout\n\t// is reached.\n\tLeaveTimeout time.Duration\n\n\t// MemberlistConfig is the memberlist configuration that Olric will\n\t// use to do the underlying membership management and gossip. Some\n\t// fields in the MemberlistConfig will be overwritten by Olric no\n\t// matter what:\n\t//\n\t//   * Name - This will always be set to the same as the NodeName\n\t//     in this configuration.\n\t//\n\t//   * ClusterEvents - Olric uses a custom event delegate.\n\t//\n\t//   * Delegate - Olric uses a custom delegate.\n\t//\n\t// You have to use NewMemberlistConfig to create a new one.\n\t// Then, you may need to modify it to tune for your environment.\n\tMemberlistConfig *memberlist.Config\n}\n\n// Validate finds errors in the current configuration.\nfunc (c *Config) Validate() error {\n\tif c.ReplicaCount < MinimumReplicaCount {\n\t\treturn fmt.Errorf(\"cannot specify ReplicaCount smaller than MinimumReplicaCount\")\n\t}\n\n\tif c.ReadQuorum <= 0 {\n\t\treturn fmt.Errorf(\"cannot specify ReadQuorum less than or equal to zero\")\n\t}\n\tif c.ReplicaCount < c.ReadQuorum {\n\t\treturn fmt.Errorf(\"cannot specify ReadQuorum greater than ReplicaCount\")\n\t}\n\n\tif c.WriteQuorum <= 0 {\n\t\treturn fmt.Errorf(\"cannot specify WriteQuorum less than or equal to zero\")\n\t}\n\tif c.ReplicaCount < c.WriteQuorum {\n\t\treturn fmt.Errorf(\"cannot specify WriteQuorum greater than ReplicaCount\")\n\t}\n\n\tif err := c.validateMemberlistConfig(); err != nil {\n\t\treturn err\n\t}\n\n\tif c.MemberCountQuorum < MinimumMemberCountQuorum {\n\t\treturn fmt.Errorf(\"cannot specify MemberCountQuorum smaller than MinimumMemberCountQuorum\")\n\t}\n\n\tif c.BindAddr == \"\" {\n\t\treturn fmt.Errorf(\"bindAddr cannot be empty\")\n\t}\n\n\tif c.BindPort == 0 {\n\t\treturn fmt.Errorf(\"bindPort cannot be empty or zero\")\n\t}\n\n\t// Check peers. If Peers slice contains node's itself, return an error.\n\tport := strconv.Itoa(c.MemberlistConfig.BindPort)\n\tthis := net.JoinHostPort(c.MemberlistConfig.BindAddr, port)\n\tfor _, peer := range c.Peers {\n\t\tif this == peer {\n\t\t\treturn fmt.Errorf(\"cannot be peer with itself\")\n\t\t}\n\t}\n\n\tif err := c.Client.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"failed to validate client configuration: %w\", err)\n\t}\n\n\tif err := c.DMaps.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"failed to validate DMap configuration: %w\", err)\n\t}\n\n\tif err := c.Authentication.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"failed to sanitize authentication configuration: %w\", err)\n\t}\n\n\tswitch c.LogLevel {\n\tcase LogLevelDebug, LogLevelWarn, LogLevelInfo, LogLevelError:\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid LogLevel: %s\", c.LogLevel)\n\t}\n\n\treturn nil\n}\n\n// Sanitize sets default values to empty configuration variables, if it's possible.\nfunc (c *Config) Sanitize() error {\n\tif c.LogOutput == nil {\n\t\tc.LogOutput = os.Stderr\n\t}\n\n\tif c.LogLevel == \"\" {\n\t\tc.LogLevel = DefaultLogLevel\n\t}\n\n\tif c.LogVerbosity <= 0 {\n\t\tc.LogVerbosity = DefaultLogVerbosity\n\t}\n\n\tif c.Logger == nil {\n\t\tc.Logger = log.New(c.LogOutput, \"\", log.LstdFlags)\n\t}\n\n\tif c.Hasher == nil {\n\t\tc.Hasher = hasher.NewDefaultHasher()\n\t}\n\n\tif c.BindAddr == \"\" {\n\t\tname, err := os.Hostname()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to read hostname from kernel: %w\", err)\n\t\t}\n\t\tc.BindAddr = name\n\t}\n\t// We currently don't support ephemeral port selection. Because it needs\n\t// improved flow control in server initialization stage.\n\tif c.BindPort == 0 {\n\t\tc.BindPort = DefaultPort\n\t}\n\n\tif c.LoadFactor == 0 {\n\t\tc.LoadFactor = DefaultLoadFactor\n\t}\n\tif c.PartitionCount == 0 {\n\t\tc.PartitionCount = DefaultPartitionCount\n\t}\n\tif c.ReplicaCount == 0 {\n\t\tc.ReplicaCount = MinimumReplicaCount\n\t}\n\n\tif c.ReadQuorum == 0 {\n\t\tc.ReadQuorum = DefaultReadQuorum\n\t}\n\tif c.WriteQuorum == 0 {\n\t\tc.WriteQuorum = DefaultWriteQuorum\n\t}\n\n\tif c.MemberCountQuorum == 0 {\n\t\tc.MemberCountQuorum = DefaultMemberCountQuorum\n\t}\n\n\tif c.MemberlistConfig == nil {\n\t\tm := memberlist.DefaultLocalConfig()\n\t\t// hostname is assigned to memberlist.BindAddr\n\t\t// memberlist.Name is assigned by olric.New\n\t\tm.BindPort = DefaultDiscoveryPort\n\t\tm.AdvertisePort = DefaultDiscoveryPort\n\t\tc.MemberlistConfig = m\n\t}\n\n\tif c.BootstrapTimeout == 0 {\n\t\tc.BootstrapTimeout = DefaultBootstrapTimeout\n\t}\n\tif c.JoinRetryInterval == 0 {\n\t\tc.JoinRetryInterval = DefaultJoinRetryInterval\n\t}\n\tif c.MaxJoinAttempts == 0 {\n\t\tc.MaxJoinAttempts = DefaultMaxJoinAttempts\n\t}\n\tif c.LeaveTimeout == 0 {\n\t\tc.LeaveTimeout = DefaultLeaveTimeout\n\t}\n\n\tif c.RoutingTablePushInterval == 0 {\n\t\tc.RoutingTablePushInterval = DefaultRoutingTablePushInterval\n\t}\n\n\tif c.TriggerBalancerInterval == 0 {\n\t\tc.TriggerBalancerInterval = DefaultTriggerBalancerInterval\n\t}\n\n\tif c.KeepAlivePeriod == 0 {\n\t\tc.KeepAlivePeriod = DefaultKeepAlivePeriod\n\t}\n\n\tif c.Client == nil {\n\t\tc.Client = NewClient()\n\t}\n\n\tif c.DMaps == nil {\n\t\tc.DMaps = &DMaps{}\n\t}\n\n\tif c.Authentication == nil {\n\t\tc.Authentication = &Authentication{}\n\t}\n\n\tif err := c.Authentication.Sanitize(); err != nil {\n\t\treturn fmt.Errorf(\"failed to sanitize authentication configuration: %w\", err)\n\t}\n\n\tif err := c.Client.Sanitize(); err != nil {\n\t\treturn fmt.Errorf(\"failed to sanitize TCP client configuration: %w\", err)\n\t}\n\n\tif err := c.DMaps.Sanitize(); err != nil {\n\t\treturn fmt.Errorf(\"failed to sanitize DMap configuration: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// New returns a Config with sane defaults. If you change a configuration parameter,\n// please run Sanitize and Validate functions respectively.\n//\n// New takes an env parameter used by memberlist: local, lan and wan.\n//\n// local:\n//\n// DefaultLocalConfig works like DefaultConfig, however it returns a configuration\n// that is optimized for a local loopback environments. The default configuration\n// is still very conservative and errs on the side of caution.\n//\n// lan:\n//\n// DefaultLANConfig returns a sane set of configurations for Memberlist. It uses\n// the hostname as the node name, and otherwise sets very conservative values\n// that are sane for most LAN environments. The default configuration errs on\n// the side of caution, choosing values that are optimized for higher convergence\n// at the cost of higher bandwidth usage. Regardless, these values are a good\n// starting point when getting started with memberlist.\n//\n// wan:\n//\n// DefaultWANConfig works like DefaultConfig, however it returns a configuration\n// that is optimized for most WAN environments. The default configuration is still\n// very conservative and errs on the side of caution.\nfunc New(env string) *Config {\n\tc := &Config{\n\t\tBindAddr:          \"0.0.0.0\",\n\t\tBindPort:          DefaultPort,\n\t\tReadRepair:        false,\n\t\tReplicaCount:      1,\n\t\tWriteQuorum:       1,\n\t\tReadQuorum:        1,\n\t\tMemberCountQuorum: 1,\n\t\tPeers:             []string{},\n\t\tDMaps:             &DMaps{},\n\t\tAuthentication:    &Authentication{},\n\t}\n\n\tm, err := NewMemberlistConfig(env)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"unable to create a new memberlist config: %v\", err))\n\t}\n\t// memberlist.Name will be assigned by olric.New\n\tm.BindPort = DefaultDiscoveryPort\n\tm.AdvertisePort = DefaultDiscoveryPort\n\tc.MemberlistConfig = m\n\n\tif err := c.Sanitize(); err != nil {\n\t\tpanic(fmt.Sprintf(\"unable to sanitize Olric config: %v\", err))\n\t}\n\n\tif err := c.Validate(); err != nil {\n\t\tpanic(fmt.Sprintf(\"unable to validate Olric config: %v\", err))\n\t}\n\treturn c\n}\n\n// Interface guard\nvar _ IConfig = (*Config)(nil)\n"
  },
  {
    "path": "config/config_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nvar testConfig = `server:\n  bindAddr: \"0.0.0.0\"\n  bindPort: 3320\n  serializer: \"msgpack\"\n  keepAlivePeriod: \"300s\"\n  idleClose: 300s\n  bootstrapTimeout: \"5s\"\n  partitionCount:  271\n  replicaCount: 2\n  writeQuorum: 1\n  readQuorum: 1\n  readRepair: false\n  replicationMode: 0 # sync mode. for async, set 1\n  memberCountQuorum: 1\n  enableClusterEventsChannel: true\n\n\nauthentication:\n  password: \"secret\"\n\nclient:\n  dialTimeout: 8s\n  readTimeout: 2s\n  writeTimeout: 2s\n  maxRetries: 5\n  minRetryBackoff: 10ms\n  maxRetryBackoff: 520ms\n  poolFIFO: true\n  poolSize: 10\n  minIdleConns: 5\n  maxConnAge: 2h\n  poolTimeout: 4s\n  idleTimeout: 6m\n\nlogging:\n  verbosity: 6\n  level: \"DEBUG\"\n  output: \"stderr\"\n\nmemberlist:\n  environment: \"local\"\n  bindAddr: \"0.0.0.0\"\n  bindPort: 3322\n  enableCompression: false\n  joinRetryInterval: \"1s\"\n  maxJoinAttempts: 10\n  peers:\n    - \"localhost:3325\"\n\n  advertiseAddr: \"\"\n  advertisePort: 3322\n  suspicionMaxTimeoutMult: 6\n  disableTCPPings: false\n  awarenessMaxMultiplier: 8\n  gossipNodes: 3\n  gossipVerifyIncoming: true\n  gossipVerifyOutgoing: true\n  dnsConfigPath: \"/etc/resolv.conf\"\n  handoffQueueDepth: 1024\n  udpBufferSize: 1400\n\ndmaps:\n  engine:\n    name: ramblock\n    config:\n      tableSize: 202134\n  numEvictionWorkers: 2\n  maxIdleDuration: 100s\n  ttlDuration: 200s\n  maxKeys: 300000\n  maxInuse: 2000000\n  lruSamples: 20\n  evictionPolicy: \"LRU\"\n  custom:\n    foobar:\n      maxIdleDuration: \"30s\"\n      ttlDuration: \"500s\"\n      maxKeys: 600000\n      lruSamples: 60\n      evictionPolicy: \"NONE\"\n\nserviceDiscovery:\n  path: \"/usr/lib/olric-consul-plugin.so\"\n  provider: \"consul\"\n  address: \"http://consul:8500\"\n  passingOnly: true\n  replaceExistingChecks: true\n  insecureSkipVerify: true\n  payload: 'SAMPLE-PAYLOAD'`\n\nfunc createTmpFile(t *testing.T, pattern string) *os.File {\n\tf, err := ioutil.TempFile(\"/tmp/\", pattern)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\tt.Cleanup(func() {\n\t\terr = f.Close()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t\terr = os.Remove(f.Name())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t})\n\treturn f\n}\n\nfunc TestConfig(t *testing.T) {\n\tw := bytes.NewBuffer([]byte(testConfig))\n\tf := createTmpFile(t, \"olric-yaml-config-test\")\n\t_, err := f.Write(w.Bytes())\n\trequire.NoError(t, err)\n\n\tlc, err := Load(f.Name())\n\trequire.NoError(t, err)\n\n\tc := New(\"local\")\n\tc.BindAddr = \"0.0.0.0\"\n\tc.BindPort = 3320\n\tc.KeepAlivePeriod = 300 * time.Second\n\tc.IdleClose = 300 * time.Second\n\tc.BootstrapTimeout = 5 * time.Second\n\tc.PartitionCount = 271\n\tc.ReplicaCount = 2\n\tc.WriteQuorum = 1\n\tc.ReadQuorum = 1\n\tc.ReadRepair = false\n\tc.ReplicationMode = SyncReplicationMode\n\tc.MemberCountQuorum = 1\n\tc.EnableClusterEventsChannel = true\n\n\tc.DMaps.Engine = NewEngine()\n\n\tc.Client.DialTimeout = 8 * time.Second\n\tc.Client.ReadTimeout = 2 * time.Second\n\tc.Client.WriteTimeout = 2 * time.Second\n\tc.Client.MaxRetries = 5\n\tc.Client.MinRetryBackoff = 10 * time.Millisecond\n\tc.Client.MaxRetryBackoff = 520 * time.Millisecond\n\tc.Client.PoolFIFO = true\n\tc.Client.PoolSize = 10\n\tc.Client.MinIdleConns = 5\n\tc.Client.MaxConnAge = 2 * time.Hour\n\tc.Client.PoolTimeout = 4 * time.Second\n\tc.Client.IdleTimeout = 6 * time.Minute\n\n\tc.LogVerbosity = 6\n\tc.LogLevel = \"DEBUG\"\n\n\tc.MemberlistConfig.BindAddr = \"0.0.0.0\"\n\tc.MemberlistConfig.BindPort = 3322\n\tc.MemberlistConfig.EnableCompression = false\n\tc.JoinRetryInterval = time.Second\n\tc.MaxJoinAttempts = 10\n\tc.Peers = []string{\"localhost:3325\"}\n\tc.MemberlistConfig.AdvertisePort = 3322\n\tc.MemberlistConfig.SuspicionMaxTimeoutMult = 6\n\tc.MemberlistConfig.DisableTcpPings = false\n\tc.MemberlistConfig.AwarenessMaxMultiplier = 8\n\tc.MemberlistConfig.GossipNodes = 3\n\tc.MemberlistConfig.GossipVerifyIncoming = true\n\tc.MemberlistConfig.GossipVerifyOutgoing = true\n\tc.MemberlistConfig.DNSConfigPath = \"/etc/resolv.conf\"\n\tc.MemberlistConfig.HandoffQueueDepth = 1024\n\tc.MemberlistConfig.UDPBufferSize = 1400\n\n\tc.DMaps.NumEvictionWorkers = 2\n\tc.DMaps.TTLDuration = 200 * time.Second\n\tc.DMaps.MaxIdleDuration = 100 * time.Second\n\tc.DMaps.MaxKeys = 300000\n\tc.DMaps.MaxInuse = 2000000\n\tc.DMaps.LRUSamples = 20\n\tc.DMaps.EvictionPolicy = LRUEviction\n\tc.DMaps.Engine.Name = DefaultStorageEngine\n\tc.DMaps.Engine.Config = map[string]interface{}{\"tableSize\": 202134}\n\n\tc.DMaps.Custom = map[string]DMap{\"foobar\": {\n\t\tMaxIdleDuration: 30 * time.Second,\n\t\tTTLDuration:     500 * time.Second,\n\t\tMaxKeys:         600000,\n\t\tLRUSamples:      60,\n\t\tEvictionPolicy:  \"NONE\",\n\t}}\n\n\tc.ServiceDiscovery = make(map[string]interface{})\n\tc.ServiceDiscovery[\"path\"] = \"/usr/lib/olric-consul-plugin.so\"\n\tc.ServiceDiscovery[\"provider\"] = \"consul\"\n\tc.ServiceDiscovery[\"address\"] = \"http://consul:8500\"\n\tc.ServiceDiscovery[\"passingOnly\"] = true\n\tc.ServiceDiscovery[\"replaceExistingChecks\"] = true\n\tc.ServiceDiscovery[\"insecureSkipVerify\"] = true\n\tc.ServiceDiscovery[\"payload\"] = \"SAMPLE-PAYLOAD\"\n\n\tc.Authentication = &Authentication{\n\t\tPassword: \"secret\",\n\t}\n\tc.Client.Authentication = c.Authentication\n\n\terr = c.Sanitize()\n\trequire.NoError(t, err)\n\n\t// Disable the following fields. They include unexported fields, pointers and mutexes.\n\tc.LogOutput = nil\n\tlc.LogOutput = nil\n\tc.Logger = nil\n\tlc.Logger = nil\n\tc.Client.Dialer = nil\n\tlc.Client.Dialer = nil\n\n\trequire.Equal(t, c, lc)\n}\n\nfunc TestConfig_Initialize(t *testing.T) {\n\tc := &Config{}\n\trequire.NoError(t, c.Sanitize())\n\trequire.NoError(t, c.Validate())\n}\n"
  },
  {
    "path": "config/dmap.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\n// EvictionPolicy denotes eviction policy. Currently: LRU or NONE.\ntype EvictionPolicy string\n\n// Important note on DMap and DMaps structs:\n// Golang does not provide the typical notion of inheritance.\n// because of that I preferred to define the types explicitly.\n\n// DMap denotes configuration for a particular distributed map. Most of the\n// fields are related with distributed cache implementation.\ntype DMap struct {\n\t// Engine contains storage engine configuration and their implementations.\n\t// If you don't have a custom storage engine implementation or configuration for\n\t// the default one, just leave it empty.\n\tEngine *Engine\n\n\t// MaxIdleDuration denotes maximum time for each entry to stay idle in the\n\t// DMap. It limits the lifetime of the entries relative to the time of the\n\t// last read or write access performed on them. The entries whose idle period\n\t// exceeds this limit are expired and evicted automatically. An entry is idle\n\t// if no Get, GetEntry, Put, Expire on it. Configuration\n\t// of MaxIdleDuration feature varies by preferred deployment method.\n\tMaxIdleDuration time.Duration\n\n\t// TTLDuration is useful to set a default TTL for every key/value pair a DMap\n\t// instance.\n\tTTLDuration time.Duration\n\n\t// MaxKeys denotes maximum key count on a particular node. So if you have 10\n\t// nodes with MaxKeys=100000, your key count in the cluster should be around\n\t// MaxKeys*10=1000000\n\tMaxKeys int\n\n\t// MaxInuse denotes maximum amount of in-use memory on a particular node. So\n\t// if you have 10 nodes with MaxInuse=100M (it has to be in bytes), amount of\n\t// in-use memory should be around MaxInuse*10=1G\n\tMaxInuse int\n\n\t// LRUSamples denotes amount of randomly selected key count by the approximate\n\t// LRU implementation. Lower values are better for high performance. It's 5\n\t// by default.\n\tLRUSamples int\n\n\t// EvictionPolicy determines the eviction policy in use. It's NONE by default.\n\t// Set as LRU to enable LRU eviction policy.\n\tEvictionPolicy EvictionPolicy\n}\n\n// Sanitize sets default values to empty configuration variables, if it's possible.\nfunc (dm *DMap) Sanitize() error {\n\tif dm.EvictionPolicy == \"\" {\n\t\tdm.EvictionPolicy = \"NONE\"\n\t}\n\tif dm.LRUSamples <= 0 {\n\t\tdm.LRUSamples = DefaultLRUSamples\n\t}\n\tif dm.MaxInuse < 0 {\n\t\tdm.MaxInuse = 0\n\t}\n\tif dm.MaxKeys < 0 {\n\t\tdm.MaxKeys = 0\n\t}\n\n\tif dm.Engine == nil {\n\t\tdm.Engine = NewEngine()\n\t}\n\n\tif err := dm.Engine.Sanitize(); err != nil {\n\t\treturn fmt.Errorf(\"failed to sanitize storage engine configuration: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// Validate finds errors in the current configuration.\nfunc (dm *DMap) Validate() error {\n\tif err := dm.Engine.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"failed to validate storage engine configuration: %w\", err)\n\t}\n\n\treturn nil\n}\n\nvar _ IConfig = (*DMap)(nil)\n"
  },
  {
    "path": "config/dmap_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestConfig_DMap(t *testing.T) {\n\td := &DMap{\n\t\tMaxInuse:   -1,\n\t\tMaxKeys:    -1,\n\t\tLRUSamples: -1,\n\t}\n\trequire.NoError(t, d.Sanitize())\n\trequire.NoError(t, d.Validate())\n\n\trequire.Greater(t, d.MaxInuse, -1)\n\trequire.Greater(t, d.MaxKeys, -1)\n\trequire.Equal(t, DefaultLRUSamples, d.LRUSamples)\n\trequire.Equal(t, EvictionPolicy(\"NONE\"), d.EvictionPolicy)\n\trequire.NotNil(t, d.Engine)\n}\n"
  },
  {
    "path": "config/dmaps.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"time\"\n)\n\n// DMaps denotes a global configuration for DMaps. You can still overwrite it by\n// setting a DMap for a particular distributed map via Custom field. Most of the\n// fields are related with distributed cache implementation.\ntype DMaps struct {\n\t// Engine contains configuration for a storage engine implementation. It may contain the implementation.\n\t// See Engine itself.\n\tEngine *Engine\n\n\t// NumEvictionWorkers denotes the number of goroutines that are used to find\n\t// keys for eviction. This is a global configuration variable. So you cannot set\n\t//\t// different values per DMap.\n\tNumEvictionWorkers int64\n\n\t// MaxIdleDuration denotes maximum time for each entry to stay idle in the DMap.\n\t// It limits the lifetime of the entries relative to the time of the last\n\t// read or write access performed on them. The entries whose idle period exceeds\n\t// this limit are expired and evicted automatically. An entry is idle if no Get,\n\t// Put, Expire on it. Configuration of MaxIdleDuration feature varies by preferred\n\t// deployment method.\n\tMaxIdleDuration time.Duration\n\n\t// TTLDuration is useful to set a default TTL for every key/value pair a\n\t// distributed map instance.\n\tTTLDuration time.Duration\n\n\t// MaxKeys denotes maximum key count on a particular node. So if you have 10\n\t// nodes with MaxKeys=100000, your key count in the cluster should be around\n\t// MaxKeys*10=1000000\n\tMaxKeys int\n\n\t// MaxInuse denotes maximum amount of in-use memory on a particular node.\n\t// So if you have 10 nodes with MaxInuse=100M (it has to be in bytes), amount\n\t// of in-use memory should be around MaxInuse*10=1G\n\tMaxInuse int\n\n\t// LRUSamples denotes amount of randomly selected key count by the approximate\n\t// LRU implementation. Lower values are better for high performance. It's\n\t// 5 by default.\n\tLRUSamples int\n\n\t// EvictionPolicy determines the eviction policy in use. It's NONE by default.\n\t// Set as LRU to enable LRU eviction policy.\n\tEvictionPolicy EvictionPolicy\n\n\t// CheckEmptyFragmentsInterval is the interval between two sequential calls of empty\n\t// fragment cleaner. This is a global configuration variable. So you cannot set\n\t// different values per DMap.\n\tCheckEmptyFragmentsInterval time.Duration\n\n\t// TriggerCompactionInterval is interval between two sequential call of compaction worker.\n\t// This is a global configuration variable. So you cannot set\n\t// different values per DMap.\n\tTriggerCompactionInterval time.Duration\n\n\t// Custom is useful to set custom cache config per DMap instance.\n\tCustom map[string]DMap\n}\n\n// Sanitize sets default values to empty configuration variables, if it's possible.\nfunc (dm *DMaps) Sanitize() error {\n\tif dm.Engine == nil {\n\t\tdm.Engine = NewEngine()\n\t}\n\n\tif dm.Custom == nil {\n\t\tdm.Custom = make(map[string]DMap)\n\t}\n\n\tif dm.EvictionPolicy == \"\" {\n\t\tdm.EvictionPolicy = \"NONE\"\n\t}\n\n\tif dm.LRUSamples <= 0 {\n\t\tdm.LRUSamples = DefaultLRUSamples\n\t}\n\n\tif dm.MaxInuse < 0 {\n\t\tdm.MaxInuse = 0\n\t}\n\n\tif dm.MaxKeys < 0 {\n\t\tdm.MaxKeys = 0\n\t}\n\n\tif dm.NumEvictionWorkers <= 0 {\n\t\tdm.NumEvictionWorkers = int64(runtime.NumCPU())\n\t}\n\n\tif dm.CheckEmptyFragmentsInterval.Microseconds() == 0 {\n\t\tdm.CheckEmptyFragmentsInterval = DefaultCheckEmptyFragmentsInterval\n\t}\n\n\tif dm.TriggerCompactionInterval.Microseconds() == 0 {\n\t\tdm.TriggerCompactionInterval = DefaultTriggerCompactionInterval\n\t}\n\n\tfor _, d := range dm.Custom {\n\t\tif err := d.Sanitize(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := dm.Engine.Sanitize(); err != nil {\n\t\treturn fmt.Errorf(\"failed to sanitize storage engine configuration: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (dm *DMaps) Validate() error {\n\tif err := dm.Engine.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"failed to validate storage engine configuration: %w\", err)\n\t}\n\treturn nil\n}\n\nvar _ IConfig = (*DMaps)(nil)\n"
  },
  {
    "path": "config/engine.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/olric-data/olric/internal/ramblock\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\n// Engine contains storage engine configuration and their implementations.\n// If you don't have a custom storage engine implementation or configuration for\n// the default one, just call NewStorageEngine() function to use it with sane defaults.\ntype Engine struct {\n\tName string\n\n\tImplementation storage.Engine\n\n\t// Config is a map that contains configuration of the storage engines, for\n\t// both plugins and imported ones. If you want to use a storage engine other\n\t// than the default one, you must set configuration for it.\n\tConfig map[string]interface{}\n}\n\n// NewEngine initializes Engine with sane defaults.\n// Olric will set its own storage engine implementation and related configuration,\n// if there is no other engine.\nfunc NewEngine() *Engine {\n\treturn &Engine{\n\t\tConfig: make(map[string]interface{}),\n\t}\n}\n\n// Validate finds errors in the current configuration.\nfunc (s *Engine) Validate() error {\n\tif s.Config == nil {\n\t\ts.Config = make(map[string]interface{})\n\t}\n\treturn nil\n}\n\n// Sanitize sets default values to empty configuration variables, if it's possible.\nfunc (s *Engine) Sanitize() error {\n\tif s.Name == \"\" {\n\t\ts.Name = DefaultStorageEngine\n\t}\n\n\t// Backward compatibility: accept the old name \"kvstore\"\n\tif s.Name == \"kvstore\" {\n\t\ts.Name = DefaultStorageEngine\n\t}\n\n\tif s.Implementation == nil {\n\t\tswitch s.Name {\n\t\tcase DefaultStorageEngine:\n\t\t\tcfg := ramblock.DefaultConfig().ToMap()\n\t\t\tfor key, value := range cfg {\n\t\t\t\t_, ok := s.Config[key]\n\t\t\t\tif !ok {\n\t\t\t\t\ts.Config[key] = value\n\t\t\t\t}\n\t\t\t}\n\t\t\tkv, err := ramblock.New(storage.NewConfig(s.Config))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ts.Implementation = kv\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown storage engine: %s\", s.Name)\n\t\t}\n\t} else {\n\t\ts.Name = s.Implementation.Name()\n\t}\n\treturn nil\n}\n\n// Interface guard\nvar _ IConfig = (*Engine)(nil)\n"
  },
  {
    "path": "config/engine_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestEngine_KVStore_Backward_Compat(t *testing.T) {\n\te := NewEngine()\n\te.Name = \"kvstore\"\n\n\trequire.NoError(t, e.Sanitize())\n\trequire.NoError(t, e.Validate())\n\trequire.Equal(t, DefaultStorageEngine, e.Name)\n\trequire.NotNil(t, e.Implementation)\n}\n\nfunc TestEngine_Dont_Overwrite_TableSize(t *testing.T) {\n\te := NewEngine()\n\te.Name = DefaultStorageEngine\n\te.Config = map[string]interface{}{\n\t\t\"tableSize\": 1235,\n\t}\n\n\trequire.NoError(t, e.Sanitize())\n\trequire.NoError(t, e.Validate())\n\trequire.Equal(t, 1235, e.Config[\"tableSize\"])\n}\n"
  },
  {
    "path": "config/internal/loader/loader.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage loader\n\nimport \"gopkg.in/yaml.v2\"\n\ntype server struct {\n\tName                       string  `yaml:\"name\"`\n\tBindAddr                   string  `yaml:\"bindAddr\"`\n\tBindPort                   int     `yaml:\"bindPort\"`\n\tInterface                  string  `yaml:\"interface\"`\n\tReplicationMode            int     `yaml:\"replicationMode\"`\n\tPartitionCount             uint64  `yaml:\"partitionCount\"`\n\tLoadFactor                 float64 `yaml:\"loadFactor\"`\n\tKeepAlivePeriod            string  `yaml:\"keepAlivePeriod\"`\n\tIdleClose                  string  `yaml:\"idleClose\"`\n\tBootstrapTimeout           string  `yaml:\"bootstrapTimeout\"`\n\tReplicaCount               int     `yaml:\"replicaCount\"`\n\tWriteQuorum                int     `yaml:\"writeQuorum\"`\n\tReadQuorum                 int     `yaml:\"readQuorum\"`\n\tReadRepair                 bool    `yaml:\"readRepair\"`\n\tMemberCountQuorum          int32   `yaml:\"memberCountQuorum\"`\n\tRoutingTablePushInterval   string  `yaml:\"routingTablePushInterval\"`\n\tTriggerBalancerInterval    string  `yaml:\"triggerBalancerInterval\"`\n\tLeaveTimeout               string  `yaml:\"leaveTimeout\"`\n\tEnableClusterEventsChannel bool    `yaml:\"enableClusterEventsChannel\"`\n}\n\ntype authentication struct {\n\tPassword string `yaml:\"password\"`\n}\n\ntype client struct {\n\tDialTimeout     string `yaml:\"dialTimeout\"`\n\tReadTimeout     string `yaml:\"readTimeout\"`\n\tWriteTimeout    string `yaml:\"writeTimeout\"`\n\tMaxRetries      int    `yaml:\"maxRetries\"`\n\tMinRetryBackoff string `yaml:\"minRetryBackoff\"`\n\tMaxRetryBackoff string `yaml:\"maxRetryBackoff\"`\n\tPoolFIFO        bool   `yaml:\"poolFIFO\"`\n\tPoolSize        int    `yaml:\"poolSize\"`\n\tMinIdleConns    int    `yaml:\"minIdleConns\"`\n\tMaxConnAge      string `yaml:\"maxConnAge\"`\n\tPoolTimeout     string `yaml:\"poolTimeout\"`\n\tIdleTimeout     string `yaml:\"idleTimeout\"`\n}\n\n// logging contains configuration variables of logging section of config file.\ntype logging struct {\n\tVerbosity int32  `yaml:\"verbosity\"`\n\tLevel     string `yaml:\"level\"`\n\tOutput    string `yaml:\"output\"`\n}\n\ntype memberlist struct {\n\tEnvironment             string   `yaml:\"environment\"` // required\n\tBindAddr                string   `yaml:\"bindAddr\"`    // required\n\tBindPort                int      `yaml:\"bindPort\"`    // required\n\tInterface               string   `yaml:\"interface\"`\n\tEnableCompression       *bool    `yaml:\"enableCompression\"`\n\tJoinRetryInterval       string   `yaml:\"joinRetryInterval\"` // required\n\tMaxJoinAttempts         int      `yaml:\"maxJoinAttempts\"`   // required\n\tPeers                   []string `yaml:\"peers\"`\n\tIndirectChecks          *int     `yaml:\"indirectChecks\"`\n\tRetransmitMult          *int     `yaml:\"retransmitMult\"`\n\tSuspicionMult           *int     `yaml:\"suspicionMult\"`\n\tTCPTimeout              *string  `yaml:\"tcpTimeout\"`\n\tPushPullInterval        *string  `yaml:\"pushPullInterval\"`\n\tProbeTimeout            *string  `yaml:\"probeTimeout\"`\n\tProbeInterval           *string  `yaml:\"probeInterval\"`\n\tGossipInterval          *string  `yaml:\"gossipInterval\"`\n\tGossipToTheDeadTime     *string  `yaml:\"gossipToTheDeadTime\"`\n\tAdvertiseAddr           *string  `yaml:\"advertiseAddr\"`\n\tAdvertisePort           *int     `yaml:\"advertisePort\"`\n\tSuspicionMaxTimeoutMult *int     `yaml:\"suspicionMaxTimeoutMult\"`\n\tDisableTCPPings         *bool    `yaml:\"disableTCPPings\"`\n\tAwarenessMaxMultiplier  *int     `yaml:\"awarenessMaxMultiplier\"`\n\tGossipNodes             *int     `yaml:\"gossipNodes\"`\n\tGossipVerifyIncoming    *bool    `yaml:\"gossipVerifyIncoming\"`\n\tGossipVerifyOutgoing    *bool    `yaml:\"gossipVerifyOutgoing\"`\n\tDNSConfigPath           *string  `yaml:\"dnsConfigPath\"`\n\tHandoffQueueDepth       *int     `yaml:\"handoffQueueDepth\"`\n\tUDPBufferSize           *int     `yaml:\"udpBufferSize\"`\n}\n\ntype engine struct {\n\tName   string                 `yaml:\"name\"`\n\tConfig map[string]interface{} `yaml:\"config\"`\n}\n\ntype dmap struct {\n\tEngine          *engine `yaml:\"engine\"`\n\tMaxIdleDuration string  `yaml:\"maxIdleDuration\"`\n\tTTLDuration     string  `yaml:\"ttlDuration\"`\n\tMaxKeys         int     `yaml:\"maxKeys\"`\n\tMaxInuse        int     `yaml:\"maxInuse\"`\n\tLRUSamples      int     `yaml:\"lruSamples\"`\n\tEvictionPolicy  string  `yaml:\"evictionPolicy\"`\n}\n\ntype dmaps struct {\n\tEngine                      *engine         `yaml:\"engine\"`\n\tNumEvictionWorkers          int64           `yaml:\"numEvictionWorkers\"`\n\tMaxIdleDuration             string          `yaml:\"maxIdleDuration\"`\n\tTTLDuration                 string          `yaml:\"ttlDuration\"`\n\tMaxKeys                     int             `yaml:\"maxKeys\"`\n\tMaxInuse                    int             `yaml:\"maxInuse\"`\n\tLRUSamples                  int             `yaml:\"lruSamples\"`\n\tEvictionPolicy              string          `yaml:\"evictionPolicy\"`\n\tCheckEmptyFragmentsInterval string          `yaml:\"checkEmptyFragmentsInterval\"`\n\tTriggerCompactionInterval   string          `yaml:\"triggerCompactionInterval\"`\n\tCustom                      map[string]dmap `yaml:\"custom\"`\n}\n\ntype serviceDiscovery map[string]interface{}\n\n// Loader is the main configuration struct\ntype Loader struct {\n\tMemberlist       memberlist       `yaml:\"memberlist\"`\n\tLogging          logging          `yaml:\"logging\"`\n\tServer           server           `yaml:\"server\"`\n\tClient           client           `yaml:\"client\"`\n\tDMaps            dmaps            `yaml:\"dmaps\"`\n\tServiceDiscovery serviceDiscovery `yaml:\"serviceDiscovery\"`\n\tAuthentication   authentication   `yaml:\"authentication\"`\n}\n\n// New tries to read Olric configuration from a YAML file.\nfunc New(data []byte) (*Loader, error) {\n\tvar lc Loader\n\tif err := yaml.Unmarshal(data, &lc); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &lc, nil\n}\n"
  },
  {
    "path": "config/load.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com/hashicorp/memberlist\"\n\t\"github.com/olric-data/olric/config/internal/loader\"\n\t\"github.com/olric-data/olric/hasher\"\n\t\"github.com/pkg/errors\"\n)\n\n// mapYamlToConfig maps a parsed YAML to related configuration struct.\nfunc mapYamlToConfig(rawDst, rawSrc interface{}) error {\n\tdst := reflect.ValueOf(rawDst).Elem()\n\tsrc := reflect.ValueOf(rawSrc).Elem()\n\tfor j := 0; j < src.NumField(); j++ {\n\t\tfor i := 0; i < dst.NumField(); i++ {\n\t\t\tif src.Type().Field(j).Name == dst.Type().Field(i).Name {\n\t\t\t\tif src.Field(j).Kind() == dst.Field(i).Kind() {\n\t\t\t\t\tdst.Field(i).Set(src.Field(j))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// Special cases\n\t\t\t\tif dst.Field(i).Type() == reflect.TypeOf(time.Duration(0)) {\n\t\t\t\t\trawValue := src.Field(j).String()\n\t\t\t\t\tif rawValue != \"\" {\n\t\t\t\t\t\tvalue, err := time.ParseDuration(rawValue)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdst.Field(i).Set(reflect.ValueOf(value))\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"failed to map %s to an appropriate field in config\", dst.Type().Field(j).Name)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc loadDMapConfig(c *loader.Loader) (*DMaps, error) {\n\tres := &DMaps{}\n\tif c.DMaps.MaxIdleDuration != \"\" {\n\t\tmaxIdleDuration, err := time.ParseDuration(c.DMaps.MaxIdleDuration)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"failed to parse dmap.MaxIdleDuration\")\n\t\t}\n\t\tres.MaxIdleDuration = maxIdleDuration\n\t}\n\n\tif c.DMaps.TTLDuration != \"\" {\n\t\tttlDuration, err := time.ParseDuration(c.DMaps.TTLDuration)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"failed to parse dmap.TTLDuration\")\n\t\t}\n\t\tres.TTLDuration = ttlDuration\n\t}\n\n\tif c.DMaps.CheckEmptyFragmentsInterval != \"\" {\n\t\tcheckEmptyFragmentsInterval, err := time.ParseDuration(c.DMaps.CheckEmptyFragmentsInterval)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"failed to parse dmap.MaxIdleDuration\")\n\t\t}\n\t\tres.CheckEmptyFragmentsInterval = checkEmptyFragmentsInterval\n\t}\n\n\tif c.DMaps.TriggerCompactionInterval != \"\" {\n\t\ttriggerCompactionInterval, err := time.ParseDuration(c.DMaps.TriggerCompactionInterval)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"failed to parse dmap.triggerCompactionInterval\")\n\t\t}\n\t\tres.TriggerCompactionInterval = triggerCompactionInterval\n\t}\n\n\tres.NumEvictionWorkers = c.DMaps.NumEvictionWorkers\n\tres.MaxKeys = c.DMaps.MaxKeys\n\tres.MaxInuse = c.DMaps.MaxInuse\n\tres.EvictionPolicy = EvictionPolicy(c.DMaps.EvictionPolicy)\n\tres.LRUSamples = c.DMaps.LRUSamples\n\n\tif c.DMaps.Engine != nil {\n\t\te := NewEngine()\n\t\te.Name = c.DMaps.Engine.Name\n\t\te.Config = c.DMaps.Engine.Config\n\t\tres.Engine = e\n\t}\n\n\tif c.DMaps.Custom != nil {\n\t\tres.Custom = make(map[string]DMap)\n\t\tfor name, dc := range c.DMaps.Custom {\n\t\t\tcc := DMap{\n\t\t\t\tMaxInuse:       dc.MaxInuse,\n\t\t\t\tMaxKeys:        dc.MaxKeys,\n\t\t\t\tEvictionPolicy: EvictionPolicy(dc.EvictionPolicy),\n\t\t\t\tLRUSamples:     dc.LRUSamples,\n\t\t\t}\n\t\t\tif dc.Engine != nil {\n\t\t\t\te := NewEngine()\n\t\t\t\te.Name = dc.Engine.Name\n\t\t\t\te.Config = dc.Engine.Config\n\t\t\t\tcc.Engine = e\n\t\t\t}\n\t\t\tif dc.MaxIdleDuration != \"\" {\n\t\t\t\tmaxIdleDuration, err := time.ParseDuration(dc.MaxIdleDuration)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.WithMessagef(err, \"failed to parse dmaps.%s.MaxIdleDuration\", name)\n\t\t\t\t}\n\t\t\t\tcc.MaxIdleDuration = maxIdleDuration\n\t\t\t}\n\t\t\tif dc.TTLDuration != \"\" {\n\t\t\t\tttlDuration, err := time.ParseDuration(dc.TTLDuration)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.WithMessagef(err, \"failed to parse dmaps.%s.TTLDuration\", name)\n\t\t\t\t}\n\t\t\t\tcc.TTLDuration = ttlDuration\n\t\t\t}\n\t\t\tres.Custom[name] = cc\n\t\t}\n\t}\n\treturn res, nil\n}\n\n// loadMemberlistConfig creates a new *memberlist.Config by parsing olric.yaml\nfunc loadMemberlistConfig(c *loader.Loader, mc *memberlist.Config) (*memberlist.Config, error) {\n\tvar err error\n\tif c.Memberlist.BindAddr == \"\" {\n\t\tname, err := os.Hostname()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tc.Memberlist.BindAddr = name\n\t}\n\tmc.BindAddr = c.Memberlist.BindAddr\n\tmc.BindPort = c.Memberlist.BindPort\n\n\tif c.Memberlist.EnableCompression != nil {\n\t\tmc.EnableCompression = *c.Memberlist.EnableCompression\n\t}\n\n\tif c.Memberlist.TCPTimeout != nil {\n\t\tmc.TCPTimeout, err = time.ParseDuration(*c.Memberlist.TCPTimeout)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif c.Memberlist.IndirectChecks != nil {\n\t\tmc.IndirectChecks = *c.Memberlist.IndirectChecks\n\t}\n\n\tif c.Memberlist.RetransmitMult != nil {\n\t\tmc.RetransmitMult = *c.Memberlist.RetransmitMult\n\t}\n\n\tif c.Memberlist.SuspicionMult != nil {\n\t\tmc.SuspicionMult = *c.Memberlist.SuspicionMult\n\t}\n\n\tif c.Memberlist.PushPullInterval != nil {\n\t\tmc.PushPullInterval, err = time.ParseDuration(*c.Memberlist.PushPullInterval)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif c.Memberlist.ProbeTimeout != nil {\n\t\tmc.ProbeTimeout, err = time.ParseDuration(*c.Memberlist.ProbeTimeout)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif c.Memberlist.ProbeInterval != nil {\n\t\tmc.ProbeInterval, err = time.ParseDuration(*c.Memberlist.ProbeInterval)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif c.Memberlist.GossipInterval != nil {\n\t\tmc.GossipInterval, err = time.ParseDuration(*c.Memberlist.GossipInterval)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif c.Memberlist.GossipToTheDeadTime != nil {\n\t\tmc.GossipToTheDeadTime, err = time.ParseDuration(*c.Memberlist.GossipToTheDeadTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif c.Memberlist.AdvertiseAddr != nil {\n\t\tmc.AdvertiseAddr = *c.Memberlist.AdvertiseAddr\n\t}\n\n\tif c.Memberlist.AdvertisePort != nil {\n\t\tmc.AdvertisePort = *c.Memberlist.AdvertisePort\n\t} else {\n\t\tmc.AdvertisePort = mc.BindPort\n\t}\n\n\tif c.Memberlist.SuspicionMaxTimeoutMult != nil {\n\t\tmc.SuspicionMaxTimeoutMult = *c.Memberlist.SuspicionMaxTimeoutMult\n\t}\n\n\tif c.Memberlist.DisableTCPPings != nil {\n\t\tmc.DisableTcpPings = *c.Memberlist.DisableTCPPings\n\t}\n\n\tif c.Memberlist.AwarenessMaxMultiplier != nil {\n\t\tmc.AwarenessMaxMultiplier = *c.Memberlist.AwarenessMaxMultiplier\n\t}\n\n\tif c.Memberlist.GossipNodes != nil {\n\t\tmc.GossipNodes = *c.Memberlist.GossipNodes\n\t}\n\tif c.Memberlist.GossipVerifyIncoming != nil {\n\t\tmc.GossipVerifyIncoming = *c.Memberlist.GossipVerifyIncoming\n\t}\n\tif c.Memberlist.GossipVerifyOutgoing != nil {\n\t\tmc.GossipVerifyOutgoing = *c.Memberlist.GossipVerifyOutgoing\n\t}\n\n\tif c.Memberlist.DNSConfigPath != nil {\n\t\tmc.DNSConfigPath = *c.Memberlist.DNSConfigPath\n\t}\n\n\tif c.Memberlist.HandoffQueueDepth != nil {\n\t\tmc.HandoffQueueDepth = *c.Memberlist.HandoffQueueDepth\n\t}\n\tif c.Memberlist.UDPBufferSize != nil {\n\t\tmc.UDPBufferSize = *c.Memberlist.UDPBufferSize\n\t}\n\treturn mc, nil\n}\n\n// Load reads and loads Olric configuration.\nfunc Load(filename string) (*Config, error) {\n\tif _, err := os.Stat(filename); os.IsNotExist(err) {\n\t\treturn nil, fmt.Errorf(\"file doesn't exists: %s\", filename)\n\t}\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := loader.New(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar logOutput io.Writer\n\tswitch {\n\tcase c.Logging.Output == \"stderr\":\n\t\tlogOutput = os.Stderr\n\tcase c.Logging.Output == \"stdout\":\n\t\tlogOutput = os.Stdout\n\tdefault:\n\t\tlogOutput = os.Stderr\n\t}\n\n\tif c.Logging.Level == \"\" {\n\t\tc.Logging.Level = DefaultLogLevel\n\t}\n\n\trawMc, err := NewMemberlistConfig(c.Memberlist.Environment)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemberlistConfig, err := loadMemberlistConfig(c, rawMc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tjoinRetryInterval,\n\t\tkeepAlivePeriod,\n\t\tidleClose,\n\t\tbootstrapTimeout,\n\t\ttriggerBalancerInterval,\n\t\tleaveTimeout,\n\t\troutingTablePushInterval time.Duration\n\t)\n\n\tif c.Server.KeepAlivePeriod != \"\" {\n\t\tkeepAlivePeriod, err = time.ParseDuration(c.Server.KeepAlivePeriod)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err,\n\t\t\t\tfmt.Sprintf(\"failed to parse server.keepAlivePeriod: '%s'\", c.Server.KeepAlivePeriod))\n\t\t}\n\t}\n\n\tif c.Server.IdleClose != \"\" {\n\t\tidleClose, err = time.ParseDuration(c.Server.IdleClose)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err,\n\t\t\t\tfmt.Sprintf(\"failed to parse server.idleClose: '%s'\", c.Server.IdleClose))\n\t\t}\n\t}\n\n\tif c.Server.BootstrapTimeout != \"\" {\n\t\tbootstrapTimeout, err = time.ParseDuration(c.Server.BootstrapTimeout)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err,\n\t\t\t\tfmt.Sprintf(\"failed to parse server.bootstrapTimeout: '%s'\", c.Server.BootstrapTimeout))\n\t\t}\n\t}\n\tif c.Memberlist.JoinRetryInterval != \"\" {\n\t\tjoinRetryInterval, err = time.ParseDuration(c.Memberlist.JoinRetryInterval)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err,\n\t\t\t\tfmt.Sprintf(\"failed to parse memberlist.joinRetryInterval: '%s'\",\n\t\t\t\t\tc.Memberlist.JoinRetryInterval))\n\t\t}\n\t}\n\tif c.Server.RoutingTablePushInterval != \"\" {\n\t\troutingTablePushInterval, err = time.ParseDuration(c.Server.RoutingTablePushInterval)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err,\n\t\t\t\tfmt.Sprintf(\"failed to parse server.routingTablePushInterval: '%s'\", c.Server.RoutingTablePushInterval))\n\t\t}\n\t}\n\n\tif c.Server.TriggerBalancerInterval != \"\" {\n\t\ttriggerBalancerInterval, err = time.ParseDuration(c.Server.TriggerBalancerInterval)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err,\n\t\t\t\tfmt.Sprintf(\"failed to parse server.triggerBalancerInterval: '%s'\", c.Server.TriggerBalancerInterval))\n\t\t}\n\t}\n\n\tif c.Server.LeaveTimeout != \"\" {\n\t\tleaveTimeout, err = time.ParseDuration(c.Server.LeaveTimeout)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithMessage(err,\n\t\t\t\tfmt.Sprintf(\"failed to parse server.leaveTimeout: '%s'\", c.Server.LeaveTimeout))\n\t\t}\n\t}\n\n\tclientConfig := Client{\n\t\tAuthentication: &Authentication{\n\t\t\tPassword: c.Authentication.Password,\n\t\t},\n\t}\n\terr = mapYamlToConfig(&clientConfig, &c.Client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdmapConfig, err := loadDMapConfig(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcfg := &Config{\n\t\tBindAddr:                   c.Server.BindAddr,\n\t\tBindPort:                   c.Server.BindPort,\n\t\tInterface:                  c.Server.Interface,\n\t\tServiceDiscovery:           c.ServiceDiscovery,\n\t\tMemberlistInterface:        c.Memberlist.Interface,\n\t\tMemberlistConfig:           memberlistConfig,\n\t\tClient:                     &clientConfig,\n\t\tLogLevel:                   c.Logging.Level,\n\t\tJoinRetryInterval:          joinRetryInterval,\n\t\tRoutingTablePushInterval:   routingTablePushInterval,\n\t\tTriggerBalancerInterval:    triggerBalancerInterval,\n\t\tEnableClusterEventsChannel: c.Server.EnableClusterEventsChannel,\n\t\tMaxJoinAttempts:            c.Memberlist.MaxJoinAttempts,\n\t\tPeers:                      c.Memberlist.Peers,\n\t\tPartitionCount:             c.Server.PartitionCount,\n\t\tReplicaCount:               c.Server.ReplicaCount,\n\t\tWriteQuorum:                c.Server.WriteQuorum,\n\t\tReadQuorum:                 c.Server.ReadQuorum,\n\t\tReplicationMode:            c.Server.ReplicationMode,\n\t\tReadRepair:                 c.Server.ReadRepair,\n\t\tLoadFactor:                 c.Server.LoadFactor,\n\t\tMemberCountQuorum:          c.Server.MemberCountQuorum,\n\t\tLogger:                     log.New(logOutput, \"\", log.LstdFlags),\n\t\tLogOutput:                  logOutput,\n\t\tLogVerbosity:               c.Logging.Verbosity,\n\t\tHasher:                     hasher.NewDefaultHasher(),\n\t\tKeepAlivePeriod:            keepAlivePeriod,\n\t\tIdleClose:                  idleClose,\n\t\tBootstrapTimeout:           bootstrapTimeout,\n\t\tLeaveTimeout:               leaveTimeout,\n\t\tDMaps:                      dmapConfig,\n\t\tAuthentication: &Authentication{\n\t\t\tPassword: c.Authentication.Password,\n\t\t},\n\t}\n\n\tif err := cfg.Sanitize(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := cfg.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn cfg, nil\n}\n"
  },
  {
    "path": "config/memberlist.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com/hashicorp/go-multierror\"\n\t\"github.com/hashicorp/memberlist\"\n)\n\nfunc (c *Config) validateMemberlistConfig() error {\n\tvar result error\n\tif c.MemberlistConfig.AdvertiseAddr != \"\" {\n\t\tif ip := net.ParseIP(c.MemberlistConfig.AdvertiseAddr); ip == nil {\n\t\t\tresult = multierror.Append(result,\n\t\t\t\tfmt.Errorf(\"memberlist: AdvertiseAddr has to be a valid IPv4 or IPv6 address\"))\n\t\t}\n\t}\n\tif c.MemberlistConfig.BindAddr == \"\" {\n\t\tresult = multierror.Append(result,\n\t\t\tfmt.Errorf(\"memberlist: BindAddr cannot be an empty string\"))\n\t}\n\treturn result\n}\n\n// NewMemberlistConfig returns a new memberlist.Config for a given environment.\n//\n// It takes an env parameter: local, lan and wan.\n//\n// local:\n// DefaultLocalConfig works like DefaultConfig, however it returns a configuration that\n// is optimized for a local loopback environments. The default configuration is still very conservative\n// and errs on the side of caution.\n//\n// lan:\n// DefaultLANConfig returns a sane set of configurations for Memberlist. It uses the hostname\n// as the node name, and otherwise sets very conservative values that are sane for most LAN environments.\n// The default configuration errs on the side of caution, choosing values that are optimized for higher convergence\n// at the cost of higher bandwidth usage. Regardless, these values are a good starting point when getting started with\n// memberlist.\n//\n// wan:\n// DefaultWANConfig works like DefaultConfig, however it returns a configuration that is optimized for most WAN\n// environments. The default configuration is still very conservative and errs on the side of caution.\nfunc NewMemberlistConfig(env string) (*memberlist.Config, error) {\n\te := strings.ToLower(env)\n\tswitch e {\n\tcase \"local\":\n\t\treturn memberlist.DefaultLocalConfig(), nil\n\tcase \"lan\":\n\t\treturn memberlist.DefaultLANConfig(), nil\n\tcase \"wan\":\n\t\treturn memberlist.DefaultWANConfig(), nil\n\t}\n\treturn nil, fmt.Errorf(\"unknown env: %s\", env)\n}\n"
  },
  {
    "path": "config/network.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"strconv\"\n\n\t\"github.com/hashicorp/go-sockaddr\"\n)\n\n// The following functions are mostly extracted from Serf. See setupAgent function in cmd/serf/command/agent/command.go\n// Thanks for the extraordinary software.\n//\n// Source: https://github.com/hashicorp/serf/blob/master/cmd/serf/command/agent/command.go#L204\n\nfunc addrParts(address string) (string, int, error) {\n\t// Get the address\n\taddr, err := net.ResolveTCPAddr(\"tcp\", address)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\n\treturn addr.IP.String(), addr.Port, nil\n}\n\nfunc getBindIPFromNetworkInterface(addrs []net.Addr) (string, error) {\n\tfor _, a := range addrs {\n\t\tvar addrIP net.IP\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t// Waiting for https://github.com/golang/go/issues/5395 to use IPNet only\n\t\t\taddr, ok := a.(*net.IPAddr)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taddrIP = addr.IP\n\t\t} else {\n\t\t\taddr, ok := a.(*net.IPNet)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taddrIP = addr.IP\n\t\t}\n\n\t\t// Skip self-assigned IPs\n\t\tif addrIP.IsLinkLocalUnicast() {\n\t\t\tcontinue\n\t\t}\n\t\treturn addrIP.String(), nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to find usable address for interface\")\n}\n\nfunc getBindIP(ifname, address string) (string, error) {\n\tbindIP, _, err := addrParts(address)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"invalid BindAddr: %w\", err)\n\t}\n\n\t// Check if we have an interface\n\tif iface, _ := net.InterfaceByName(ifname); iface != nil {\n\t\taddrs, err := iface.Addrs()\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to get interface addresses: %w\", err)\n\t\t}\n\t\tif len(addrs) == 0 {\n\t\t\treturn \"\", fmt.Errorf(\"interface '%s' has no addresses\", ifname)\n\t\t}\n\n\t\t// If there is no bind IP, pick an address\n\t\tif bindIP == \"0.0.0.0\" {\n\t\t\taddr, err := getBindIPFromNetworkInterface(addrs)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"ip scan on %s: %w\", ifname, err)\n\t\t\t}\n\t\t\treturn addr, nil\n\t\t}\n\t\t// If there is a bind IP, ensure it is available\n\t\tfor _, a := range addrs {\n\t\t\taddr, ok := a.(*net.IPNet)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif addr.IP.String() == bindIP {\n\t\t\t\treturn bindIP, nil\n\t\t\t}\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"interface '%s' has no '%s' address\", ifname, bindIP)\n\t}\n\tif bindIP == \"0.0.0.0\" {\n\t\t// if we're not bound to a specific IP, let's use a suitable private IP address.\n\t\tipStr, err := sockaddr.GetPrivateIP()\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to get private interface addresses: %w\", err)\n\t\t}\n\n\t\t// if we could not find a private address, we need to expand our search to a public\n\t\t// ip address\n\t\tif ipStr == \"\" {\n\t\t\tipStr, err = sockaddr.GetPublicIP()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"failed to get public interface addresses: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tif ipStr == \"\" {\n\t\t\treturn \"\", fmt.Errorf(\"no private IP address found, and explicit IP not provided\")\n\t\t}\n\n\t\tparsed := net.ParseIP(ipStr)\n\t\tif parsed == nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to parse private IP address: %q\", ipStr)\n\t\t}\n\t\tbindIP = parsed.String()\n\t}\n\treturn bindIP, nil\n}\n\n// SetupNetworkConfig tries to find an appropriate bindIP to bind and propagate.\nfunc (c *Config) SetupNetworkConfig() (err error) {\n\taddress := net.JoinHostPort(c.BindAddr, strconv.Itoa(c.BindPort))\n\tc.BindAddr, err = getBindIP(c.Interface, address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taddress = net.JoinHostPort(c.MemberlistConfig.BindAddr, strconv.Itoa(c.MemberlistConfig.BindPort))\n\tc.MemberlistConfig.BindAddr, err = getBindIP(c.MemberlistInterface, address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c.MemberlistConfig.AdvertiseAddr != \"\" {\n\t\tadvertisePort := c.MemberlistConfig.AdvertisePort\n\t\tif advertisePort == 0 {\n\t\t\tadvertisePort = c.MemberlistConfig.BindPort\n\t\t}\n\t\taddress := net.JoinHostPort(c.MemberlistConfig.AdvertiseAddr, strconv.Itoa(advertisePort))\n\t\tadvertiseAddr, _, err := addrParts(address)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.MemberlistConfig.AdvertiseAddr = advertiseAddr\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "config/network_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestConfig_SetupNetworkConfig(t *testing.T) {\n\tc := &Config{}\n\trequire.NoError(t, c.Sanitize())\n\trequire.NoError(t, c.Validate())\n\n\trequire.NoError(t, c.SetupNetworkConfig())\n}\n\nfunc TestConfig_SetupNetworkConfig_Memberlist_AdvertiseAddr(t *testing.T) {\n\tc := &Config{}\n\trequire.NoError(t, c.Sanitize())\n\trequire.NoError(t, c.Validate())\n\tc.MemberlistConfig.AdvertiseAddr = \"localhost\"\n\trequire.NoError(t, c.SetupNetworkConfig())\n\trequire.NotEqual(t, \"localhost\", c.MemberlistConfig.AdvertiseAddr)\n}\n"
  },
  {
    "path": "docker/README.md",
    "content": "# Multi-container environment with Docker Compose\n\nWe provide a multi-container environment to test, develop and deploy Olric clusters. This environment includes nginx as \nTCP reverse proxy and Consul for service discovery. \n\n## Usage\n\nIn this folder, simply run:\n\n```\ndocker-compose up olric\n```\n\nTo create a multi-node cluster:\n\n```\ndocker-compose up --scale olric=10 olric\n```\n\nSample output:\n\n```\ndocker-compose up olric\nCreating docker_nginx_1  ... done\nCreating docker_consul_1 ... done\nCreating docker_olric_1  ... done\nCreating docker_olric_2  ... done\nAttaching to docker_olric_1\nolric_1      | 2020/08/12 15:53:18 [olric-server] pid: 1 has been started on 172.25.0.4:3320\nolric_1      | 2020/08/12 15:53:18 [INFO] Service discovery plugin is enabled, provider: consul\nolric_1      | 2020/08/12 15:53:18 [DEBUG] memberlist: Stream connection from=172.25.0.3:56830\nolric_1      | 2020/08/12 15:53:19 [ERROR] Join attempt returned error: no peers found => olric.go:2\n```\n\nYou can modify `olric-server-consul.yaml` file to try different configuration options. \n\nIf Consul service works without any problem, you can visit [http://localhost:8500](http://localhost:8500) to monitor \ncluster health.\n\n### Accessing to the cluster\n\n`nginx` service exposes port `3320` to access the cluster. You can list the cluster members with `CLUSTER.MEMBERS` command.\n\n```\n$ redis-cli -p 3320\n127.0.0.1:3320> CLUSTER.MEMBERS\n 1) 1) \"172.18.0.9:3320\"\n    2) (integer) 1745597203895069302\n    3) \"true\"\n 2) 1) \"172.18.0.10:3320\"\n    2) (integer) 1745597204061500052\n    3) \"false\"\n 3) 1) \"172.18.0.11:3320\"\n    2) (integer) 1745597204182767469\n    3) \"false\"\n 4) 1) \"172.18.0.3:3320\"\n    2) (integer) 1745597204275319219\n    3) \"false\"\n 5) 1) \"172.18.0.6:3320\"\n    2) (integer) 1745597204337977552\n    3) \"false\"\n 6) 1) \"172.18.0.4:3320\"\n    2) (integer) 1745597204369791844\n    3) \"false\"\n 7) 1) \"172.18.0.12:3320\"\n    2) (integer) 1745597204385693552\n    3) \"false\"\n 8) 1) \"172.18.0.7:3320\"\n    2) (integer) 1745597204523284927\n    3) \"false\"\n 9) 1) \"172.18.0.13:3320\"\n    2) (integer) 1745597204665281636\n    3) \"false\"\n10) 1) \"172.18.0.8:3320\"\n    2) (integer) 1745597208386416971\n    3) \"false\"\n```\n\nLet's taste the DMap:\n\n```\n$ redis-cli -p 3320\n127.0.0.1:3320> DM.PUT test my-key my-value\nOK\n127.0.0.1:3320> DM.GET test my-key\n\"my-value\"\n127.0.0.1:3320>\n```\n\n## Service discovery\n\nOlric provides a service discovery subsystem via a plugin interface. We currently have three service discovery plugins:\n\n* [olric-consul-plugin](https://github.com/olric-data/olric-consul-plugin): Consul-backed service discovery, \n* [olric-nats-plugin](https://github.com/justinfx/olric-nats-plugin): Nats-backed service discovery,\n* [olric-cloud-plugin](https://github.com/olric-data/olric-cloud-plugin): Service discovery plugin for cloud environments (AWS, GKE, Azure and Kubernetes)\n\nWe use the Consul plugin in this document:\n\n### Consul \n\nConsul is easy to use and a proven way to discover nodes in a clustered environment. Olric discover the other nodes via \n[olric-consul-plugin](https://github.com/olric-data/olric-consul-plugin). Here is a simple payload for this setup:\n\n```json\n{\n  \"Name\": \"olric-cluster\",\n  \"Tags\": [\n    \"primary\",\n    \"v1\"\n  ],\n  \"Port\": 3322,\n  \"EnableTagOverride\": false,\n  \"Check\": {\n    \"Name\": \"Olric node on 3322\",\n    \"Interval\": \"1s\",\n    \"Timeout\": \"10s\"\n  }\n}\n```\n\n`3322` is used by [hashicorp/memberlist](https://github.com/hashicorp/memberlist) to maintain an eventually consistent view of the cluster. \nConsul dials this port to control the node. `Address`, `ID` and `Check.TCP` fields is being filled by the plugin. You can still \ngive your own configuration values, if you know what you are doing.\n\nPlease check out `olric-server-consul.yaml` to see how to create an Olric cluster with Consul."
  },
  {
    "path": "docker/docker-compose.yml",
    "content": "services:\n  nginx:\n    image: nginx:latest\n    restart: on-failure\n    volumes:\n      - ${PWD}/nginx.conf:/etc/nginx/nginx.conf:ro\n    ports:\n      - '3320:3320'\n\n  consul:\n    platform: linux/amd64\n    image: public.ecr.aws/bitnami/consul:latest\n    volumes:\n      - consul_data:/bitnami/consul\n    ports:\n      - '8300:8300'\n      - '8301:8301'\n      - '8301:8301/udp'\n      - '8500:8500'\n      - '8600:8600'\n      - '8600:8600/udp'\n\n  olric:\n    platform: linux/amd64\n    image: ghcr.io/olric-data/olric-consul-plugin:latest\n    restart: on-failure\n    volumes:\n      - ${PWD}/olric-server-consul.yaml:/etc/olric-server.yaml:ro\n    depends_on:\n      - nginx\n      - consul\n\nvolumes:\n  consul_data:\n    driver: local"
  },
  {
    "path": "docker/nginx.conf",
    "content": "user  nginx;\n\nevents {\n    worker_connections   1000;\n}\n\nstream {\n    server {\n       listen 3320;\n       proxy_pass olric:3320;\n    }\n}\n"
  },
  {
    "path": "docker/olric-server-consul.yaml",
    "content": "server:\n  # BindAddr denotes the address that Olric will bind to for communication\n  # with other Olric nodes.\n  bindAddr: 0.0.0.0\n\n  # BindPort denotes the address that Olric will bind to for communication\n  # with other Olric nodes.\n  bindPort: 3320\n\n  # KeepAlivePeriod denotes whether the operating system should send\n  # keep-alive messages on the connection.\n  keepAlivePeriod: 300s\n\n  # IdleClose will automatically close idle connections after the specified duration.\n  # Use zero to disable this feature.\n  # idleClose: 300s\n\n  # Timeout for bootstrap control\n  #\n  # An Olric node checks operation status before taking any action for the\n  # cluster events, responding incoming requests and running API functions.\n  # Bootstrapping status is one of the most important checkpoints for an\n  # \"operable\" Olric node. BootstrapTimeout sets a deadline to check\n  # bootstrapping status without blocking indefinitely.\n  bootstrapTimeout: 5s\n\n  # PartitionCount is 271, by default.\n  partitionCount: 271\n\n  # ReplicaCount is 1, by default.\n  replicaCount: 1\n\n  # Minimum number of successful writes to return a response for a write request.\n  writeQuorum: 1\n\n  # Minimum number of successful reads to return a response for a read request.\n  readQuorum: 1\n\n  # Switch to control read-repair algorithm which helps to reduce entropy.\n  readRepair: false\n\n  # Default value is SyncReplicationMode.\n  replicationMode: 0 # sync mode. for async, set 1\n\n  # Minimum number of members to form a cluster and run any query on the cluster.\n  memberCountQuorum: 1\n\n  # Coordinator member pushes the routing table to cluster members in the case of\n  # node join or left events. It also pushes the table periodically. routingTablePushInterval\n  # is the interval between subsequent calls. Default is 1 minute.\n  routingTablePushInterval: 1m\n\n  # Olric can send push cluster events to cluster.events channel. Available cluster events:\n  #\n  # * node-join-event\n  # * node-left-event\n  # * fragment-migration-event\n  # * fragment-received-event\n  #\n  # If you want to receive these events, set true to EnableClusterEventsChannel and subscribe to\n  # cluster.events channel. Default is false.\n  enableClusterEventsChannel: true\n\nclient:\n  # Timeout for TCP dial.\n  #\n  # The timeout includes name resolution, if required. When using TCP, and the host in the address parameter\n  # resolves to multiple IP addresses, the timeout is spread over each consecutive dial, such that each is\n  # given an appropriate fraction of the time to connect.\n  dialTimeout: 5s\n\n  # Timeout for socket reads. If reached, commands will fail\n  # with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.\n  # Default is DefaultReadTimeout\n  readTimeout: 3s\n\n  # Timeout for socket writes. If reached, commands will fail\n  # with a timeout instead of blocking.\n  # Default is DefaultWriteTimeout\n  writeTimeout: 3s\n\n  # Maximum number of retries before giving up.\n  # Default is 3 retries; -1 (not 0) disables retries.\n  #maxRetries: 3\n\n  # Minimum backoff between each retry.\n  # Default is 8 milliseconds; -1 disables backoff.\n  #minRetryBackoff: 8ms\n\n  # Maximum backoff between each retry.\n  # Default is 512 milliseconds; -1 disables backoff.\n  #maxRetryBackoff: 512ms\n\n  # Type of connection pool.\n  # true for FIFO pool, false for LIFO pool.\n  # Note that fifo has higher overhead compared to lifo.\n  #poolFIFO: false\n\n  # Maximum number of socket connections.\n  # Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.\n  #poolSize: 0\n\n  # Minimum number of idle connections which is useful when establishing\n  # new connection is slow.\n  #minIdleConns:\n\n  # Connection age at which client retires (closes) the connection.\n  # Default is to not close aged connections.\n  #maxConnAge:\n\n  # Amount of time client waits for connection if all connections are busy before\n  # returning an error. Default is ReadTimeout + 1 second.\n  #poolTimeout: 3s\n\n  # Amount of time after which client closes idle connections.\n  # Should be less than server's timeout.\n  # Default is 5 minutes. -1 disables idle timeout check.\n  idleTimeout: 5m\n\n  # Frequency of idle checks made by idle connections reaper.\n  # Default is 1 minute. -1 disables idle connections reaper,\n  # but idle connections are still discarded by the client\n  # if IdleTimeout is set.\n  idleCheckFrequency: 1m\n\n\nlogging:\n  # DefaultLogVerbosity denotes default log verbosity level.\n  #\n  # * 1 - Generally useful for this to ALWAYS be visible to an operator\n  #   * Programmer errors\n  #   * Logging extra info about a panic\n  #   * CLI argument handling\n  # * 2 - A reasonable default log level if you don't want verbosity.\n  #   * Information about config (listening on X, watching Y)\n  #   * Errors that repeat frequently that relate to conditions that can be\n  #     corrected\n  # * 3 - Useful steady state information about the service and\n  #     important log messages that may correlate to\n  #   significant changes in the system.  This is the recommended default log\n  #     level for most systems.\n  #   * Logging HTTP requests and their exit code\n  #   * System state changing\n  #   * Controller state change events\n  #   * Scheduler log messages\n  # * 4 - Extended information about changes\n  #   * More info about system state changes\n  # * 5 - Debug level verbosity\n  #   * Logging in particularly thorny parts of code where you may want to come\n  #     back later and check it\n  # * 6 - Trace level verbosity\n  #   * Context to understand the steps leading up to neterrors and warnings\n  #   * More information for troubleshooting reported issues\n  verbosity: 3\n\n  # Default LogLevel is DEBUG. Available levels: \"DEBUG\", \"WARN\", \"ERROR\", \"INFO\"\n  level: WARN\n  output: stderr\n\nmemberlist:\n  environment: lan\n\n  # Configuration related to what address to bind to and ports to\n  # listen on. The port is used for both UDP and TCP gossip. It is\n  # assumed other nodes are running on this port, but they do not need\n  # to.\n  bindAddr: 0.0.0.0\n  bindPort: 3322\n\n  # EnableCompression is used to control message compression. This can\n  # be used to reduce bandwidth usage at the cost of slightly more CPU\n  # utilization. This is only available starting at protocol version 1.\n  enableCompression: false\n\n  # JoinRetryInterval is the time gap between attempts to join an existing\n  # cluster.\n  joinRetryInterval: 1ms\n\n  # MaxJoinAttempts denotes the maximum number of attemps to join an existing\n  # cluster before forming a new one.\n  maxJoinAttempts: 1\n\n  # See service discovery plugins\n  #peers:\n  #  - \"localhost:3325\"\n\n  #advertiseAddr: \"\"\n  #advertisePort: 3322\n  #suspicionMaxTimeoutMult: 6\n  #disableTCPPings: false\n  #awarenessMaxMultiplier: 8\n  #gossipNodes: 3\n  #gossipVerifyIncoming: true\n  #gossipVerifyOutgoing: true\n  #dnsConfigPath: \"/etc/resolv.conf\"\n  #handoffQueueDepth: 1024\n  #udpBufferSize: 1400\n\ndmaps:\n  engine:\n    name: ramblock\n    config:\n      tableSize: 524288 # bytes\n#  checkEmptyFragmentsInterval: 1m\n#  triggerCompactionInterval: 10m\n#  numEvictionWorkers: 1\n#  maxIdleDuration: \"\"\n#  ttlDuration: \"100s\"\n#  maxKeys: 100000\n#  maxInuse: 1000000\n#  lRUSamples: 10\n#  evictionPolicy: \"LRU\"\n#  custom:\n#   foobar:\n#      maxIdleDuration: \"60s\"\n#      ttlDuration: \"300s\"\n#      maxKeys: 500000\n#      lRUSamples: 20\n#      evictionPolicy: \"NONE\"\n\nserviceDiscovery:\n  # path is a required property and used by Olric. It has to be a full path.\n  path: \"/usr/lib/olric-consul-plugin.so\"\n\n  # provider is just informal,\n  provider: \"consul\"\n\n  # Plugin specific configuration\n  # Consul server, used by the plugin. It's required\n  address: \"http://consul:8500\"\n\n  # Specifies that the server should return only nodes with all checks in the passing state.\n  passingOnly: true\n\n  # Missing health checks from the request will be deleted from the agent. Using this parameter\n  # allows to idempotently register a service and its checks without having to manually deregister\n  # checks.\n  replaceExistingChecks: true\n\n  # InsecureSkipVerify controls whether a client verifies the\n  # server's certificate chain and host name.\n  # If InsecureSkipVerify is true, TLS accepts any certificate\n  # presented by the server and any host name in that certificate.\n  # In this mode, TLS is susceptible to man-in-the-middle attacks.\n  # This should be used only for testing.\n  insecureSkipVerify: true\n\n  # service record\n  payload: '\n      {\n          \"Name\": \"olric-cluster\",\n          \"Tags\": [\n            \"primary\",\n            \"v1\"\n          ],\n          \"Port\": 3322,\n          \"EnableTagOverride\": false,\n          \"Check\": {\n            \"Name\": \"Olric node on 3322\",\n            \"Interval\": \"1s\",\n            \"Timeout\": \"10s\"\n          }\n      }\n'"
  },
  {
    "path": "embedded_client.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/util\"\n\t\"github.com/olric-data/olric/stats\"\n)\n\n// EmbeddedLockContext is returned by Lock and LockWithTimeout methods.\n// It should be stored in a proper way to release the lock.\ntype EmbeddedLockContext struct {\n\tkey   string\n\ttoken []byte\n\tdm    *EmbeddedDMap\n}\n\n// Unlock releases the lock.\nfunc (l *EmbeddedLockContext) Unlock(ctx context.Context) error {\n\terr := l.dm.dm.Unlock(ctx, l.key, l.token)\n\treturn convertDMapError(err)\n}\n\n// Lease takes the duration to update the expiry for the given Lock.\nfunc (l *EmbeddedLockContext) Lease(ctx context.Context, duration time.Duration) error {\n\terr := l.dm.dm.Lease(ctx, l.key, l.token, duration)\n\treturn convertDMapError(err)\n}\n\n// EmbeddedClient is an Olric client implementation for embedded-member scenario.\ntype EmbeddedClient struct {\n\tdb *Olric\n}\n\n// EmbeddedDMap is an DMap client implementation for embedded-member scenario.\ntype EmbeddedDMap struct {\n\tmtx           sync.RWMutex\n\tclusterClient *ClusterClient\n\tconfig        *dmapConfig\n\tmember        discovery.Member\n\tdm            *dmap.DMap\n\tclient        *EmbeddedClient\n\tname          string\n}\n\nfunc (dm *EmbeddedDMap) setOrGetClusterClient() (Client, error) {\n\t// Acquire the read lock and try to access the cluster client, if any.\n\tdm.mtx.RLock()\n\tif dm.clusterClient != nil {\n\t\tdm.mtx.RUnlock()\n\t\treturn dm.clusterClient, nil\n\t}\n\tdm.mtx.RUnlock()\n\n\t// The cluster client is unset, try to create a new one.\n\tdm.mtx.Lock()\n\tdefer dm.mtx.Unlock()\n\n\t// Check the existing value last time. There can be another running instances\n\t// of this function.\n\tif dm.clusterClient != nil {\n\t\treturn dm.clusterClient, nil\n\t}\n\n\t// Create a new cluster client here.\n\tc, err := NewClusterClient([]string{dm.client.db.rt.This().String()})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdm.clusterClient = c\n\n\treturn dm.clusterClient, nil\n}\n\n// Pipeline is a mechanism to realise Redis Pipeline technique.\n//\n// Pipelining is a technique to extremely speed up processing by packing\n// operations to batches, send them at once to Redis and read a replies in a\n// singe step.\n// See https://redis.io/topics/pipelining\n//\n// Pay attention, that Pipeline is not a transaction, so you can get unexpected\n// results in case of big pipelines and small read/write timeouts.\n// Redis client has retransmission logic in case of timeouts, pipeline\n// can be retransmitted and commands can be executed more than once.\nfunc (dm *EmbeddedDMap) Pipeline(opts ...PipelineOption) (*DMapPipeline, error) {\n\tcc, err := dm.setOrGetClusterClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclusterDMap, err := cc.NewDMap(dm.name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn clusterDMap.Pipeline(opts...)\n}\n\n// RefreshMetadata fetches a list of available members and the latest routing\n// table version. It also closes stale clients, if there are any. EmbeddedClient has\n// this method to implement the Client interface. It doesn't need to refresh metadata manually.\nfunc (e *EmbeddedClient) RefreshMetadata(_ context.Context) error {\n\t// EmbeddedClient already has the latest metadata.\n\treturn nil\n}\n\n// Scan returns an iterator to loop over the keys.\n//\n// Available scan options:\n//\n// * Count\n// * Match\nfunc (dm *EmbeddedDMap) Scan(ctx context.Context, options ...ScanOption) (Iterator, error) {\n\tcc, err := dm.setOrGetClusterClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcdm, err := cc.NewDMap(dm.name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ti, err := cdm.Scan(ctx, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := &EmbeddedIterator{\n\t\tclient: dm.client,\n\t\tdm:     dm.dm,\n\t}\n\n\tclusterIterator := i.(*ClusterIterator)\n\tclusterIterator.scanner = e.scanOnOwners\n\te.clusterIterator = clusterIterator\n\treturn e, nil\n}\n\n// Lock sets a lock for the given key. Acquired lock is only for the key in\n// this dmap.\n//\n// It returns immediately if it acquires the lock for the given key. Otherwise,\n// it waits until deadline.\n//\n// You should know that the locks are approximate, and only to be used for\n// non-critical purposes.\nfunc (dm *EmbeddedDMap) Lock(ctx context.Context, key string, deadline time.Duration) (LockContext, error) {\n\ttoken, err := dm.dm.Lock(ctx, key, 0*time.Second, deadline)\n\tif err != nil {\n\t\treturn nil, convertDMapError(err)\n\t}\n\treturn &EmbeddedLockContext{\n\t\tkey:   key,\n\t\ttoken: token,\n\t\tdm:    dm,\n\t}, nil\n}\n\n// LockWithTimeout sets a lock for the given key. If the lock is still unreleased\n// the end of given period of time,\n// it automatically releases the lock. Acquired lock is only for the key in\n// this dmap.\n//\n// It returns immediately if it acquires the lock for the given key. Otherwise,\n// it waits until deadline.\n//\n// You should know that the locks are approximate, and only to be used for\n// non-critical purposes.\nfunc (dm *EmbeddedDMap) LockWithTimeout(ctx context.Context, key string, timeout, deadline time.Duration) (LockContext, error) {\n\ttoken, err := dm.dm.Lock(ctx, key, timeout, deadline)\n\tif err != nil {\n\t\treturn nil, convertDMapError(err)\n\t}\n\treturn &EmbeddedLockContext{\n\t\tkey:   key,\n\t\ttoken: token,\n\t\tdm:    dm,\n\t}, nil\n}\n\n// Destroy flushes the given DMap on the cluster. You should know that there\n// is no global lock on DMaps. So if you call Put/PutEx and Destroy methods\n// concurrently on the cluster, Put call may set new values to the DMap.\nfunc (dm *EmbeddedDMap) Destroy(ctx context.Context) error {\n\treturn dm.dm.Destroy(ctx)\n}\n\n// Expire updates the expiry for the given key. It returns ErrKeyNotFound if\n// the DB does not contain the key. It's thread-safe.\nfunc (dm *EmbeddedDMap) Expire(ctx context.Context, key string, timeout time.Duration) error {\n\treturn dm.dm.Expire(ctx, key, timeout)\n}\n\n// Name exposes name of the DMap.\nfunc (dm *EmbeddedDMap) Name() string {\n\treturn dm.name\n}\n\n// GetPut atomically sets the key to value and returns the old value stored at key. It returns nil if there is no\n// previous value.\nfunc (dm *EmbeddedDMap) GetPut(ctx context.Context, key string, value interface{}) (*GetResponse, error) {\n\te, err := dm.dm.GetPut(ctx, key, value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &GetResponse{\n\t\tentry: e,\n\t}, nil\n}\n\n// Decr atomically decrements the key by delta. The return value is the new value\n// after being decremented or an error.\nfunc (dm *EmbeddedDMap) Decr(ctx context.Context, key string, delta int) (int, error) {\n\treturn dm.dm.Decr(ctx, key, delta)\n}\n\n// Incr atomically increments the key by delta. The return value is the new value\n// after being incremented or an error.\nfunc (dm *EmbeddedDMap) Incr(ctx context.Context, key string, delta int) (int, error) {\n\treturn dm.dm.Incr(ctx, key, delta)\n}\n\n// IncrByFloat atomically increments the key by delta. The return value is the new value after being incremented or an error.\nfunc (dm *EmbeddedDMap) IncrByFloat(ctx context.Context, key string, delta float64) (float64, error) {\n\treturn dm.dm.IncrByFloat(ctx, key, delta)\n}\n\n// Delete deletes values for the given keys. Delete will not return error\n// if key doesn't exist. It's thread-safe. It is safe to modify the contents\n// of the argument after Delete returns.\nfunc (dm *EmbeddedDMap) Delete(ctx context.Context, keys ...string) (int, error) {\n\treturn dm.dm.Delete(ctx, keys...)\n}\n\n// Get gets the value for the given key. It returns ErrKeyNotFound if the DB\n// does not contain the key. It's thread-safe. It is safe to modify the contents\n// of the returned value. See GetResponse for the details.\nfunc (dm *EmbeddedDMap) Get(ctx context.Context, key string) (*GetResponse, error) {\n\tresult, err := dm.dm.Get(ctx, key)\n\tif err != nil {\n\t\treturn nil, convertDMapError(err)\n\t}\n\n\treturn &GetResponse{\n\t\tentry: result,\n\t}, nil\n}\n\n// Put sets the value for the given key. It overwrites any previous value for\n// that key, and it's thread-safe. The key has to be a string. value type is arbitrary.\n// It is safe to modify the contents of the arguments after Put returns but not before.\nfunc (dm *EmbeddedDMap) Put(ctx context.Context, key string, value interface{}, options ...PutOption) error {\n\tvar pc dmap.PutConfig\n\tfor _, opt := range options {\n\t\topt(&pc)\n\t}\n\terr := dm.dm.Put(ctx, key, value, &pc)\n\tif err != nil {\n\t\treturn convertDMapError(err)\n\t}\n\treturn nil\n}\n\n// Close stops background routines and frees allocated resources.\nfunc (dm *EmbeddedDMap) Close(ctx context.Context) error {\n\tdm.mtx.RLock()\n\tclusterClient := dm.clusterClient\n\tdm.mtx.RUnlock()\n\n\tif clusterClient != nil {\n\t\treturn dm.clusterClient.Close(ctx)\n\t}\n\treturn nil\n}\n\nfunc (e *EmbeddedClient) NewDMap(name string, options ...DMapOption) (DMap, error) {\n\tdm, err := e.db.dmap.NewDMap(name)\n\tif err != nil {\n\t\treturn nil, convertDMapError(err)\n\t}\n\n\tvar dc dmapConfig\n\tfor _, opt := range options {\n\t\topt(&dc)\n\t}\n\n\treturn &EmbeddedDMap{\n\t\tconfig: &dc,\n\t\tdm:     dm,\n\t\tname:   name,\n\t\tclient: e,\n\t\tmember: e.db.rt.This(),\n\t}, nil\n}\n\n// Stats exposes some useful metrics to monitor an Olric node.\nfunc (e *EmbeddedClient) Stats(ctx context.Context, address string, options ...StatsOption) (stats.Stats, error) {\n\tif err := e.db.isOperable(); err != nil {\n\t\t// this node is not bootstrapped yet.\n\t\treturn stats.Stats{}, err\n\t}\n\tvar cfg statsConfig\n\tfor _, opt := range options {\n\t\topt(&cfg)\n\t}\n\n\tif address == e.db.rt.This().String() {\n\t\treturn e.db.stats(cfg), nil\n\t}\n\n\tstatsCmd := protocol.NewStats()\n\tif cfg.CollectRuntime {\n\t\tstatsCmd.SetCollectRuntime()\n\t}\n\tcmd := statsCmd.Command(ctx)\n\trc := e.db.client.Get(address)\n\terr := rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn stats.Stats{}, processProtocolError(err)\n\t}\n\n\tif err = cmd.Err(); err != nil {\n\t\treturn stats.Stats{}, processProtocolError(err)\n\t}\n\tdata, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn stats.Stats{}, processProtocolError(err)\n\t}\n\tvar s stats.Stats\n\terr = json.Unmarshal(data, &s)\n\tif err != nil {\n\t\treturn stats.Stats{}, processProtocolError(err)\n\t}\n\treturn s, nil\n}\n\n// Close stops background routines and frees allocated resources.\nfunc (e *EmbeddedClient) Close(_ context.Context) error {\n\treturn nil\n}\n\n// Ping sends a ping message to an Olric node. Returns PONG if message is empty,\n// otherwise return a copy of the message as a bulk. This command is often used to test\n// if a connection is still alive, or to measure latency.\nfunc (e *EmbeddedClient) Ping(ctx context.Context, addr, message string) (string, error) {\n\tresponse, err := e.db.ping(ctx, addr, message)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn util.BytesToString(response), nil\n}\n\n// RoutingTable returns the latest version of the routing table.\nfunc (e *EmbeddedClient) RoutingTable(ctx context.Context) (RoutingTable, error) {\n\treturn e.db.routingTable(ctx)\n}\n\n// Members returns a thread-safe list of cluster members.\nfunc (e *EmbeddedClient) Members(_ context.Context) ([]Member, error) {\n\tmembers := e.db.rt.Discovery().GetMembers()\n\tcoordinator := e.db.rt.Discovery().GetCoordinator()\n\tvar result []Member\n\tfor _, member := range members {\n\t\tm := Member{\n\t\t\tName:      member.Name,\n\t\t\tID:        member.ID,\n\t\t\tBirthdate: member.Birthdate,\n\t\t}\n\t\tif coordinator.ID == member.ID {\n\t\t\tm.Coordinator = true\n\t\t}\n\t\tresult = append(result, m)\n\t}\n\treturn result, nil\n}\n\n// NewPubSub returns a new PubSub client with the given options.\nfunc (e *EmbeddedClient) NewPubSub(options ...PubSubOption) (*PubSub, error) {\n\treturn newPubSub(e.db.client, options...)\n}\n\n// NewEmbeddedClient creates and returns a new EmbeddedClient instance.\nfunc (db *Olric) NewEmbeddedClient() *EmbeddedClient {\n\treturn &EmbeddedClient{db: db}\n}\n\nvar (\n\t_ Client = (*EmbeddedClient)(nil)\n\t_ DMap   = (*EmbeddedDMap)(nil)\n)\n"
  },
  {
    "path": "embedded_client_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\nfunc TestEmbeddedClient_NewDMap(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\t_, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n}\n\nfunc TestEmbeddedClient_DMap_Put(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(context.Background(), \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n}\n\nfunc TestEmbeddedClient_DMap_Put_EX(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", EX(time.Second))\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Second)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestEmbeddedClient_DMap_Put_PX(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", PX(time.Millisecond))\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestEmbeddedClient_DMap_Put_EXAT(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", EXAT(time.Duration(time.Now().Add(time.Second).UnixNano())))\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Second)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestEmbeddedClient_DMap_Put_PXAT(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", PXAT(time.Duration(time.Now().Add(time.Millisecond).UnixNano())))\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestEmbeddedClient_DMap_Put_NX(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", NX())\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\t_, err = dm.Get(ctx, \"mykey\")\n\trequire.NoError(t, err)\n}\n\nfunc TestEmbeddedClient_DMap_Put_XX(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\", XX())\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestEmbeddedClient_DMap_Get(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(context.Background(), \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\n\tgr, err := dm.Get(context.Background(), \"mykey\")\n\trequire.NoError(t, err)\n\n\tvalue, err := gr.String()\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"myvalue\", value)\n}\n\nfunc TestEmbeddedClient_DMap_Delete(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(context.Background(), \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\n\tcount, err := dm.Delete(context.Background(), \"mykey\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, 1, count)\n\n\t_, err = dm.Get(context.Background(), \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestEmbeddedClient_DMap_Delete_Many_Keys(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tvar keys []string\n\tfor i := 0; i < 10; i++ {\n\t\tkey := testutil.ToKey(i)\n\t\terr = dm.Put(context.Background(), key, \"myvalue\")\n\t\trequire.NoError(t, err)\n\t\tkeys = append(keys, key)\n\t}\n\n\tcount, err := dm.Delete(context.Background(), keys...)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 10, count)\n}\n\nfunc TestEmbeddedClient_DMap_Atomic_Incr(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 100; i++ {\n\t\terrGr.Go(func() error {\n\t\t\t_, err = dm.Incr(ctx, \"mykey\", 1)\n\t\t\treturn err\n\t\t})\n\t}\n\trequire.NoError(t, errGr.Wait())\n\n\tgr, err := dm.Get(context.Background(), \"mykey\")\n\tres, err := gr.Int()\n\trequire.NoError(t, err)\n\trequire.Equal(t, 100, res)\n}\n\nfunc TestEmbeddedClient_DMap_Atomic_Decr(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\terr = dm.Put(ctx, \"mykey\", 100)\n\trequire.NoError(t, err)\n\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 100; i++ {\n\t\terrGr.Go(func() error {\n\t\t\t_, err = dm.Decr(ctx, \"mykey\", 1)\n\t\t\treturn err\n\t\t})\n\t}\n\trequire.NoError(t, errGr.Wait())\n\n\tgr, err := dm.Get(context.Background(), \"mykey\")\n\tres, err := gr.Int()\n\trequire.NoError(t, err)\n\trequire.Equal(t, 0, res)\n}\n\nfunc TestEmbeddedClient_DMap_GetPut(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tgr, err := dm.GetPut(context.Background(), \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\n\t_, err = gr.String()\n\trequire.ErrorIs(t, err, ErrNilResponse)\n\n\tgr, err = dm.GetPut(context.Background(), \"mykey\", \"myvalue-2\")\n\trequire.NoError(t, err)\n\n\tvalue, err := gr.String()\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"myvalue\", value)\n}\n\nfunc TestEmbeddedClient_DMap_Atomic_IncrByFloat(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 100; i++ {\n\t\terrGr.Go(func() error {\n\t\t\t_, err = dm.IncrByFloat(ctx, \"mykey\", 1.2)\n\t\t\treturn err\n\t\t})\n\t}\n\trequire.NoError(t, errGr.Wait())\n\n\tgr, err := dm.Get(context.Background(), \"mykey\")\n\tres, err := gr.Float64()\n\trequire.NoError(t, err)\n\trequire.Equal(t, 120.0000000000002, res)\n}\n\nfunc TestEmbeddedClient_DMap_Expire(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\terr = dm.Put(ctx, \"mykey\", \"myvalue\")\n\trequire.NoError(t, err)\n\n\terr = dm.Expire(ctx, \"mykey\", time.Millisecond)\n\trequire.NoError(t, err)\n\n\t<-time.After(2 * time.Millisecond)\n\n\t_, err = dm.Get(context.Background(), \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestEmbeddedClient_DMap_Destroy(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\")\n\t\trequire.NoError(t, err)\n\t}\n\n\terr = dm.Destroy(ctx)\n\trequire.NoError(t, err)\n\n\t// Destroy is an async command. Wait for some time to see its effect.\n\t<-time.After(100 * time.Millisecond)\n\n\tstats, err := e.Stats(ctx, e.db.rt.This().String())\n\trequire.NoError(t, err)\n\tvar total int\n\tfor _, part := range stats.Partitions {\n\t\ttotal += part.Length\n\t}\n\trequire.Greater(t, 100, total)\n}\n\nfunc TestEmbeddedClient_DMap_Lock(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tkey := \"lock.key.test\"\n\n\tlx, err := dm.Lock(ctx, key, time.Second)\n\trequire.NoError(t, err)\n\n\terr = lx.Unlock(ctx)\n\trequire.NoError(t, err)\n}\n\nfunc TestEmbeddedClient_DMap_Lock_ErrLockNotAcquired(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tkey := \"lock.key.test\"\n\n\t_, err = dm.Lock(ctx, key, time.Second)\n\trequire.NoError(t, err)\n\n\t_, err = dm.Lock(ctx, key, time.Millisecond)\n\trequire.ErrorIs(t, err, ErrLockNotAcquired)\n}\n\nfunc TestEmbeddedClient_DMap_Lock_ErrNoSuchLock(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tkey := \"lock.key.test\"\n\n\tlx, err := dm.Lock(ctx, key, time.Second)\n\trequire.NoError(t, err)\n\n\terr = lx.Unlock(ctx)\n\trequire.NoError(t, err)\n\n\terr = lx.Unlock(ctx)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestEmbeddedClient_DMap_LockWithTimeout(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tkey := \"lock.key.test\"\n\n\tlx, err := dm.LockWithTimeout(ctx, key, 5*time.Second, time.Second)\n\trequire.NoError(t, err)\n\n\terr = lx.Unlock(ctx)\n\trequire.NoError(t, err)\n}\n\nfunc TestEmbeddedClient_DMap_LockWithTimeout_Timeout(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tkey := \"lock.key.test\"\n\n\tlx, err := dm.LockWithTimeout(ctx, key, time.Millisecond, time.Second)\n\trequire.NoError(t, err)\n\n\t<-time.After(2 * time.Millisecond)\n\n\terr = lx.Unlock(ctx)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestEmbeddedClient_DMap_LockWithTimeout_ErrLockNotAcquired(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tkey := \"lock.key.test\"\n\n\t_, err = dm.LockWithTimeout(ctx, key, 10*time.Second, time.Second)\n\trequire.NoError(t, err)\n\n\t_, err = dm.LockWithTimeout(ctx, key, 10*time.Second, time.Millisecond)\n\trequire.ErrorIs(t, err, ErrLockNotAcquired)\n}\n\nfunc TestEmbeddedClient_DMap_LockWithTimeout_ErrNoSuchLock(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tkey := \"lock.key.test\"\n\n\tlx, err := dm.LockWithTimeout(ctx, key, time.Second, time.Second)\n\trequire.NoError(t, err)\n\n\terr = lx.Unlock(ctx)\n\trequire.NoError(t, err)\n\n\terr = lx.Unlock(ctx)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestEmbeddedClient_DMap_LockWithTimeout_ErrNoSuchLock_Timeout(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tkey := \"lock.key.test\"\n\n\tlx, err := dm.LockWithTimeout(ctx, key, time.Millisecond, time.Second)\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\terr = lx.Unlock(ctx)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestEmbeddedClient_DMap_LockWithTimeout_Then_Lease(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tkey := \"lock.key.test\"\n\n\tlx, err := dm.LockWithTimeout(ctx, key, 50*time.Millisecond, time.Second)\n\trequire.NoError(t, err)\n\n\t// Expand its timeout value\n\terr = lx.Lease(ctx, time.Hour)\n\trequire.NoError(t, err)\n\n\t<-time.After(100 * time.Millisecond)\n\n\t_, err = dm.Lock(ctx, key, time.Millisecond)\n\trequire.ErrorIs(t, err, ErrLockNotAcquired)\n}\n\nfunc TestEmbeddedClient_RoutingTable_Standalone(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\trt, err := e.RoutingTable(context.Background())\n\trequire.NoError(t, err)\n\trequire.Len(t, rt, int(db.config.PartitionCount))\n\tfor _, route := range rt {\n\t\trequire.Len(t, route.PrimaryOwners, 1)\n\t\trequire.Equal(t, db.rt.This().String(), route.PrimaryOwners[0])\n\t\trequire.Len(t, route.ReplicaOwners, 0)\n\t}\n}\n\nfunc TestEmbeddedClient_RoutingTable_Cluster(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\n\tcluster.addMember(t) // Cluster coordinator\n\t<-time.After(250 * time.Millisecond)\n\n\tcluster.addMember(t)\n\tdb2 := cluster.addMember(t)\n\n\te := db2.NewEmbeddedClient()\n\trt, err := e.RoutingTable(context.Background())\n\trequire.NoError(t, err)\n\trequire.Len(t, rt, int(db2.config.PartitionCount))\n\towners := make(map[string]struct{})\n\tfor _, route := range rt {\n\t\tfor _, owner := range route.PrimaryOwners {\n\t\t\towners[owner] = struct{}{}\n\t\t}\n\t}\n\trequire.Len(t, owners, 3)\n}\n\nfunc TestEmbeddedClient_Member(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\tcluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tmembers, err := e.Members(context.Background())\n\trequire.NoError(t, err)\n\trequire.Len(t, members, 2)\n\tcoordinator := db.rt.Discovery().GetCoordinator()\n\tfor _, member := range members {\n\t\trequire.NotEqual(t, \"\", member.Name)\n\t\trequire.NotEqual(t, 0, member.ID)\n\t\trequire.NotEqual(t, 0, member.Birthdate)\n\t\tif coordinator.ID == member.ID {\n\t\t\trequire.True(t, member.Coordinator)\n\t\t} else {\n\t\t\trequire.False(t, member.Coordinator)\n\t\t}\n\t}\n}\n\nfunc TestEmbeddedClient_Ping(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tctx := context.Background()\n\tresponse, err := e.Ping(ctx, db.rt.This().String(), \"\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, DefaultPingResponse, response)\n}\n\nfunc TestEmbeddedClient_Ping_WithMessage(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tctx := context.Background()\n\tmessage := \"Olric is the best\"\n\tresponse, err := e.Ping(ctx, db.rt.This().String(), message)\n\trequire.NoError(t, err)\n\trequire.Equal(t, message, response)\n}\n\nfunc TestEmbeddedClient_DMap_Put_PX_With_NX(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb0 := cluster.addMember(t)\n\tdb1 := cluster.addMember(t)\n\n\tctx := context.Background()\n\te := db0.NewEmbeddedClient()\n\tdm0, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm0.Put(ctx, \"mykey\", \"myvalue\", PX(time.Minute), NX())\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\te = db1.NewEmbeddedClient()\n\tdm1, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tgr, err := dm1.Get(ctx, \"mykey\")\n\trequire.NoError(t, err)\n\tassert.NotZero(t, gr.TTL())\n}\n\nfunc TestEmbeddedClient_Issue263(t *testing.T) {\n\tinitNumRoutines := runtime.NumGoroutine()\n\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tctx, cancel := context.WithCancel(context.Background())\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\t// Create N key-value pairs:\n\tconst N = 100\n\tfor i := range N {\n\t\tkey := fmt.Sprintf(\"key-%d\", i)\n\t\tvalue := fmt.Sprintf(\"value-%d\", i)\n\t\terr := dm.Put(ctx, key, value)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Iterate M times over N keys:\n\tconst M = 100\n\tfor range M {\n\t\titer, err := dm.Scan(ctx)\n\t\trequire.NoError(t, err)\n\t\tfor iter.Next() {\n\t\t\t// Do nothing\n\t\t}\n\t\titer.Close()\n\t}\n\n\trequire.NoError(t, dm.Close(ctx))\n\trequire.NoError(t, e.Close(ctx))\n\trequire.NoError(t, db.Shutdown(ctx))\n\n\tcancel()\n\n\tassert.Equal(t, initNumRoutines, runtime.NumGoroutine())\n\n\truntime.GC()\n\ttime.Sleep(time.Second)\n\n\ts := runtime.MemStats{}\n\truntime.ReadMemStats(&s)\n\n\tconst (\n\t\tKB = 1 << 10\n\t\tMB = KB << 10\n\t)\n\n\tbuf := make([]byte, MB)\n\tstackSize := runtime.Stack(buf, true)\n\n\tt.Logf(\"Non-freed objects: %d\\n\", s.Mallocs-s.Frees)\n\tt.Logf(\"Mem in use (KB): %d\\n\", s.HeapAlloc/KB)\n\tt.Logf(\"Go-routines remained: %d\\n\", runtime.NumGoroutine())\n\tt.Logf(\"Stack traces:\\n%s\\n\", buf[:stackSize])\n}\n"
  },
  {
    "path": "embedded_iterator.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n)\n\n// EmbeddedIterator implements distributed query on DMaps.\ntype EmbeddedIterator struct {\n\tmtx sync.Mutex\n\n\tclient          *EmbeddedClient\n\tdm              *dmap.DMap\n\tclusterIterator *ClusterIterator\n}\n\nfunc (e *EmbeddedIterator) scanOnOwners() error {\n\towners := e.clusterIterator.getOwners()\n\n\tfor idx, owner := range owners {\n\t\tcursor := e.clusterIterator.loadCursor(owner)\n\n\t\tif e.client.db.rt.This().String() == owner {\n\t\t\tkeys, newCursor, err := e.dm.Scan(e.clusterIterator.partID, cursor, e.clusterIterator.config)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\te.clusterIterator.updateIterator(keys, newCursor, owner)\n\t\t\tif newCursor == 0 {\n\t\t\t\te.clusterIterator.removeScannedOwner(idx)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Build a scan command here\n\t\ts := protocol.NewScan(e.clusterIterator.partID, e.clusterIterator.dm.Name(), cursor)\n\t\tif e.clusterIterator.config.HasCount {\n\t\t\ts.SetCount(e.clusterIterator.config.Count)\n\t\t}\n\t\tif e.clusterIterator.config.HasMatch {\n\t\t\ts.SetMatch(e.clusterIterator.config.Match)\n\t\t}\n\t\tif e.clusterIterator.config.Replica {\n\t\t\ts.SetReplica()\n\t\t}\n\n\t\tscanCmd := s.Command(e.clusterIterator.ctx)\n\t\t// Fetch a Redis client for the given owner.\n\t\trc := e.clusterIterator.clusterClient.client.Get(owner)\n\t\terr := rc.Process(e.clusterIterator.ctx, scanCmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tkeys, newCursor, err := scanCmd.Result()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\te.clusterIterator.updateIterator(keys, newCursor, owner)\n\t\tif newCursor == 0 {\n\t\t\te.clusterIterator.removeScannedOwner(idx)\n\t\t}\n\t}\n\treturn nil\n}\n\n// Next returns true if there is more key in the iterator implementation.\n// Otherwise, it returns false.\nfunc (e *EmbeddedIterator) Next() bool {\n\treturn e.clusterIterator.Next()\n}\n\n// Key returns a key name from the distributed map.\nfunc (e *EmbeddedIterator) Key() string {\n\treturn e.clusterIterator.Key()\n}\n\n// Close stops the iteration and releases allocated resources.\nfunc (e *EmbeddedIterator) Close() {\n\te.clusterIterator.Close()\n}\n"
  },
  {
    "path": "embedded_iterator_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestEmbeddedClient_ScanMatch(t *testing.T) {\n\tcl := newTestOlricCluster(t)\n\tdb := cl.addMember(t)\n\tcl.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\n\tevenKeys := make(map[string]bool)\n\tfor i := 0; i < 100; i++ {\n\t\tvar key string\n\t\tif i%2 == 0 {\n\t\t\tkey = fmt.Sprintf(\"even:%s\", testutil.ToKey(i))\n\t\t\tevenKeys[key] = false\n\t\t} else {\n\t\t\tkey = fmt.Sprintf(\"odd:%s\", testutil.ToKey(i))\n\t\t}\n\t\terr = dm.Put(ctx, key, i)\n\t\trequire.NoError(t, err)\n\t}\n\ti, err := dm.Scan(ctx, Match(\"^even:\"))\n\trequire.NoError(t, err)\n\tvar count int\n\tdefer i.Close()\n\n\tfor i.Next() {\n\t\tcount++\n\t\trequire.Contains(t, evenKeys, i.Key())\n\t}\n\trequire.Equal(t, 50, count)\n}\n\nfunc TestEmbeddedClient_Scan(t *testing.T) {\n\tcl := newTestOlricCluster(t)\n\tdb := cl.addMember(t)\n\tcl.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\tdm, err := e.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tallKeys := make(map[string]bool)\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), i)\n\t\trequire.NoError(t, err)\n\t\tallKeys[testutil.ToKey(i)] = false\n\t}\n\ti, err := dm.Scan(ctx)\n\trequire.NoError(t, err)\n\tvar count int\n\tdefer i.Close()\n\n\tfor i.Next() {\n\t\tcount++\n\t\trequire.Contains(t, allKeys, i.Key())\n\t}\n\trequire.Equal(t, 100, count)\n}\n"
  },
  {
    "path": "events/cluster_events.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage events\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/olric-data/olric/internal/util\"\n)\n\nconst (\n\tClusterEventsChannel       = \"cluster.events\"\n\tKindNodeJoinEvent          = \"node-join-event\"\n\tKindNodeLeftEvent          = \"node-left-event\"\n\tKindFragmentMigrationEvent = \"fragment-migration-event\"\n\tKindFragmentReceivedEvent  = \"fragment-received-event\"\n)\n\ntype Event interface {\n\tEncode() (string, error)\n}\n\n// encodeEvents encodes given interface to its JSON representation and preserves the order in fields slice.\nfunc encodeEvent(data interface{}, fields []string, valueExtractor func(r reflect.Value, field string) (interface{}, error)) (string, error) {\n\tbuf := bytes.NewBuffer(nil)\n\tbuf.WriteString(\"{\")\n\tr := reflect.Indirect(reflect.ValueOf(data))\n\tfor i, field := range fields {\n\t\tsf, ok := r.Type().FieldByName(field)\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"field not found: %s\", field)\n\t\t}\n\n\t\ttag := strings.Trim(string(sf.Tag), \"json:\")\n\t\ttag, err := strconv.Unquote(tag)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tvalue, err := valueExtractor(r, field)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif i != 0 {\n\t\t\tbuf.WriteString(\",\")\n\t\t}\n\n\t\t// marshal key\n\t\tkey, err := json.Marshal(tag)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbuf.Write(key)\n\n\t\tbuf.WriteString(\":\")\n\t\t// marshal value\n\t\tval, err := json.Marshal(value)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbuf.Write(val)\n\t}\n\tbuf.WriteString(\"}\")\n\treturn util.BytesToString(buf.Bytes()), nil\n}\n\ntype NodeJoinEvent struct {\n\tKind      string `json:\"kind\"`\n\tSource    string `json:\"source\"`\n\tNodeJoin  string `json:\"node_join\"`\n\tTimestamp int64  `json:\"timestamp\"`\n}\n\nfunc (n *NodeJoinEvent) Encode() (string, error) {\n\tfields := []string{\"Timestamp\", \"Source\", \"Kind\", \"NodeJoin\"}\n\treturn encodeEvent(n, fields, func(r reflect.Value, field string) (interface{}, error) {\n\t\tvar value interface{}\n\t\tswitch field {\n\t\tcase \"Timestamp\":\n\t\t\tvalue = r.FieldByName(field).Int()\n\t\tcase \"Source\", \"Kind\", \"NodeJoin\":\n\t\t\tvalue = r.FieldByName(field).String()\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid field: %s\", field)\n\t\t}\n\t\treturn value, nil\n\t})\n}\n\ntype NodeLeftEvent struct {\n\tKind      string `json:\"kind\"`\n\tSource    string `json:\"source\"`\n\tNodeLeft  string `json:\"node_left\"`\n\tTimestamp int64  `json:\"timestamp\"`\n}\n\nfunc (n *NodeLeftEvent) Encode() (string, error) {\n\tfields := []string{\"Timestamp\", \"Source\", \"Kind\", \"NodeLeft\"}\n\treturn encodeEvent(n, fields, func(r reflect.Value, field string) (interface{}, error) {\n\t\tvar value interface{}\n\t\tswitch field {\n\t\tcase \"Timestamp\":\n\t\t\tvalue = r.FieldByName(field).Int()\n\t\tcase \"Source\", \"Kind\", \"NodeLeft\":\n\t\t\tvalue = r.FieldByName(field).String()\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid field: %s\", field)\n\t\t}\n\t\treturn value, nil\n\t})\n}\n\ntype FragmentMigrationEvent struct {\n\tKind          string `json:\"kind\"`\n\tSource        string `json:\"source\"`\n\tTarget        string `json:\"target\"`\n\tIdentifier    string `json:\"identifier\"`\n\tPartitionID   uint64 `json:\"partition_id\"`\n\tDataStructure string `json:\"data_structure\"`\n\tLength        int    `json:\"length\"`\n\tIsBackup      bool   `json:\"is_backup\"`\n\tTimestamp     int64  `json:\"timestamp\"`\n}\n\nfunc (f *FragmentMigrationEvent) Encode() (string, error) {\n\tfields := []string{\n\t\t\"Timestamp\",\n\t\t\"Source\",\n\t\t\"Kind\",\n\t\t\"Target\",\n\t\t\"DataStructure\",\n\t\t\"PartitionID\",\n\t\t\"Identifier\",\n\t\t\"IsBackup\",\n\t\t\"Length\",\n\t}\n\treturn encodeEvent(f, fields, func(r reflect.Value, field string) (interface{}, error) {\n\t\tvar value interface{}\n\t\tswitch field {\n\t\tcase \"IsBackup\":\n\t\t\tvalue = r.FieldByName(field).Bool()\n\t\tcase \"PartitionID\":\n\t\t\tvalue = r.FieldByName(field).Uint()\n\t\tcase \"Timestamp\", \"Length\":\n\t\t\tvalue = r.FieldByName(field).Int()\n\t\tcase \"Source\", \"Kind\", \"Target\", \"DataStructure\", \"Identifier\":\n\t\t\tvalue = r.FieldByName(field).String()\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid field: %s\", field)\n\t\t}\n\t\treturn value, nil\n\t})\n}\n\ntype FragmentReceivedEvent struct {\n\tKind          string `json:\"kind\"`\n\tSource        string `json:\"source\"`\n\tIdentifier    string `json:\"identifier\"`\n\tPartitionID   uint64 `json:\"partition_id\"`\n\tDataStructure string `json:\"data_structure\"`\n\tLength        int    `json:\"length\"`\n\tIsBackup      bool   `json:\"is_backup\"`\n\tTimestamp     int64  `json:\"timestamp\"`\n}\n\nfunc (f *FragmentReceivedEvent) Encode() (string, error) {\n\tfields := []string{\n\t\t\"Timestamp\",\n\t\t\"Source\",\n\t\t\"Kind\",\n\t\t\"DataStructure\",\n\t\t\"PartitionID\",\n\t\t\"Identifier\",\n\t\t\"IsBackup\",\n\t\t\"Length\",\n\t}\n\treturn encodeEvent(f, fields, func(r reflect.Value, field string) (interface{}, error) {\n\t\tvar value interface{}\n\t\tswitch field {\n\t\tcase \"IsBackup\":\n\t\t\tvalue = r.FieldByName(field).Bool()\n\t\tcase \"PartitionID\":\n\t\t\tvalue = r.FieldByName(field).Uint()\n\t\tcase \"Timestamp\", \"Length\":\n\t\t\tvalue = r.FieldByName(field).Int()\n\t\tcase \"Source\", \"Kind\", \"DataStructure\", \"Identifier\":\n\t\t\tvalue = r.FieldByName(field).String()\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid field: %s\", field)\n\t\t}\n\t\treturn value, nil\n\t})\n}\n"
  },
  {
    "path": "events/cluster_events_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage events\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestClusterEvents_NodeJoinEvent(t *testing.T) {\n\tvar timestamp int64 = 585199808000 // Author's birthdate!\n\tn := NodeJoinEvent{\n\t\tKind:      KindNodeJoinEvent,\n\t\tSource:    \"127.0.0.1:3423\",\n\t\tNodeJoin:  \"127.0.0.1:3576\",\n\t\tTimestamp: timestamp,\n\t}\n\tresult, err := n.Encode()\n\trequire.NoError(t, err)\n\texpected := `{\"timestamp\":585199808000,\"source\":\"127.0.0.1:3423\",\"kind\":\"node-join-event\",\"node_join\":\"127.0.0.1:3576\"}`\n\trequire.Equal(t, expected, result)\n}\n\nfunc TestClusterEvents_NodeLeftEvent(t *testing.T) {\n\tvar timestamp int64 = 585199808000 // Author's birthdate!\n\tn := NodeLeftEvent{\n\t\tKind:      KindNodeLeftEvent,\n\t\tSource:    \"127.0.0.1:3423\",\n\t\tNodeLeft:  \"127.0.0.1:3576\",\n\t\tTimestamp: timestamp,\n\t}\n\tresult, err := n.Encode()\n\trequire.NoError(t, err)\n\texpected := `{\"timestamp\":585199808000,\"source\":\"127.0.0.1:3423\",\"kind\":\"node-left-event\",\"node_left\":\"127.0.0.1:3576\"}`\n\trequire.Equal(t, expected, result)\n}\n\nfunc TestClusterEvents_FragmentMigrationEvent(t *testing.T) {\n\tvar timestamp int64 = 585199808000 // Author's birthdate!\n\tn := FragmentMigrationEvent{\n\t\tKind:          KindFragmentMigrationEvent,\n\t\tSource:        \"127.0.0.1:3423\",\n\t\tTarget:        \"127.0.0.1:3576\",\n\t\tIdentifier:    \"mydmap\",\n\t\tPartitionID:   123,\n\t\tDataStructure: \"dmap\",\n\t\tLength:        1234,\n\t\tTimestamp:     timestamp,\n\t}\n\tresult, err := n.Encode()\n\trequire.NoError(t, err)\n\texpected := `{\"timestamp\":585199808000,\"source\":\"127.0.0.1:3423\",\"kind\":\"fragment-migration-event\",\"target\":\"127.0.0.1:3576\",\"data_structure\":\"dmap\",\"partition_id\":123,\"identifier\":\"mydmap\",\"is_backup\":false,\"length\":1234}`\n\trequire.Equal(t, expected, result)\n}\n\nfunc TestClusterEvents_FragmentReceivedEvent(t *testing.T) {\n\tvar timestamp int64 = 585199808000 // Author's birthdate!\n\tn := FragmentReceivedEvent{\n\t\tKind:          KindFragmentReceivedEvent,\n\t\tSource:        \"127.0.0.1:3423\",\n\t\tIdentifier:    \"mydmap\",\n\t\tPartitionID:   123,\n\t\tDataStructure: \"dmap\",\n\t\tLength:        1234,\n\t\tTimestamp:     timestamp,\n\t}\n\tresult, err := n.Encode()\n\trequire.NoError(t, err)\n\texpected := `{\"timestamp\":585199808000,\"source\":\"127.0.0.1:3423\",\"kind\":\"fragment-received-event\",\"data_structure\":\"dmap\",\"partition_id\":123,\"identifier\":\"mydmap\",\"is_backup\":false,\"length\":1234}`\n\trequire.Equal(t, expected, result)\n}\n"
  },
  {
    "path": "get_response.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/resp\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\nvar ErrNilResponse = errors.New(\"storage entry is nil\")\n\ntype GetResponse struct {\n\tentry storage.Entry\n}\n\nfunc (g *GetResponse) Scan(v interface{}) error {\n\tif g.entry == nil {\n\t\treturn ErrNilResponse\n\t}\n\treturn resp.Scan(g.entry.Value(), v)\n}\n\nfunc (g *GetResponse) Int() (int, error) {\n\tv := new(int)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) String() (string, error) {\n\tv := new(string)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Int8() (int8, error) {\n\tv := new(int8)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Int16() (int16, error) {\n\tv := new(int16)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Int32() (int32, error) {\n\tv := new(int32)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Int64() (int64, error) {\n\tv := new(int64)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Uint() (uint, error) {\n\tv := new(uint)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Uint8() (uint8, error) {\n\tv := new(uint8)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Uint16() (uint16, error) {\n\tv := new(uint16)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Uint32() (uint32, error) {\n\tv := new(uint32)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Uint64() (uint64, error) {\n\tv := new(uint64)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Float32() (float32, error) {\n\tv := new(float32)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Float64() (float64, error) {\n\tv := new(float64)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Bool() (bool, error) {\n\tv := new(bool)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Time() (time.Time, error) {\n\tv := new(time.Time)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Duration() (time.Duration, error) {\n\tv := new(time.Duration)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) Byte() ([]byte, error) {\n\tv := new([]byte)\n\terr := g.Scan(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn *v, nil\n}\n\nfunc (g *GetResponse) TTL() int64 {\n\treturn g.entry.TTL()\n}\n\nfunc (g *GetResponse) Timestamp() int64 {\n\treturn g.entry.Timestamp()\n}\n"
  },
  {
    "path": "get_response_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/internal/resp\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Get_GetResponse(t *testing.T) {\n\tcluster := testcluster.New(dmap.NewService)\n\ts := cluster.AddMember(nil).(*dmap.Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tt.Run(\"Scan\", func(t *testing.T) {\n\t\tvar value = 100\n\t\terr = dm.Put(ctx, \"mykey-scan\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-scan\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue := new(int)\n\t\terr = gr.Scan(scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, *scannedValue)\n\t})\n\n\tt.Run(\"Byte\", func(t *testing.T) {\n\t\tvar value = []byte(\"olric\")\n\t\terr = dm.Put(ctx, \"mykey-byte\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-byte\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Byte()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"TTL\", func(t *testing.T) {\n\t\tvar value = []byte(\"olric\")\n\t\terr = dm.Put(ctx, \"mykey-byte\", value, &dmap.PutConfig{\n\t\t\tHasEX: true,\n\t\t\tEX:    time.Second,\n\t\t})\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-byte\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tttl := gr.TTL()\n\t\trequire.Greater(t, ttl, int64(0))\n\t})\n\n\tt.Run(\"Timestamp\", func(t *testing.T) {\n\t\tvar value = []byte(\"olric\")\n\t\terr = dm.Put(ctx, \"mykey-byte\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-byte\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\ttimestamp := gr.Timestamp()\n\t\trequire.Greater(t, timestamp, int64(0))\n\t})\n\n\tt.Run(\"Int\", func(t *testing.T) {\n\t\tvar value = 100\n\t\terr = dm.Put(ctx, \"mykey-Int\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Int\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Int()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"String\", func(t *testing.T) {\n\t\tvar value = \"olric\"\n\t\terr = dm.Put(ctx, \"mykey-String\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-String\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.String()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Int8\", func(t *testing.T) {\n\t\tvar value int8 = 10\n\t\terr = dm.Put(ctx, \"mykey-Int8\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Int8\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Int8()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Int16\", func(t *testing.T) {\n\t\tvar value int16 = 10\n\t\terr = dm.Put(ctx, \"mykey-Int16\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Int16\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Int16()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Int32\", func(t *testing.T) {\n\t\tvar value int32 = 10\n\t\terr = dm.Put(ctx, \"mykey-Int32\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Int32\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Int32()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Int64\", func(t *testing.T) {\n\t\tvar value int64 = 10\n\t\terr = dm.Put(ctx, \"mykey-Int64\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Int64\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Int64()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Int64\", func(t *testing.T) {\n\t\tvar value int64 = 10\n\t\terr = dm.Put(ctx, \"mykey-Int64\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Int64\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Int64()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Uint\", func(t *testing.T) {\n\t\tvar value uint = 10\n\t\terr = dm.Put(ctx, \"mykey-Uint\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Uint\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Uint()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Uint8\", func(t *testing.T) {\n\t\tvar value uint8 = 10\n\t\terr = dm.Put(ctx, \"mykey-Uint8\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Uint8\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Uint8()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Uint16\", func(t *testing.T) {\n\t\tvar value uint16 = 10\n\t\terr = dm.Put(ctx, \"mykey-Uint16\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Uint16\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Uint16()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Uint32\", func(t *testing.T) {\n\t\tvar value uint32 = 10\n\t\terr = dm.Put(ctx, \"mykey-Uint32\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Uint32\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Uint32()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Uint64\", func(t *testing.T) {\n\t\tvar value uint64 = 10\n\t\terr = dm.Put(ctx, \"mykey-Uint64\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Uint64\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Uint64()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Float32\", func(t *testing.T) {\n\t\tvar value float32 = 10.12\n\t\terr = dm.Put(ctx, \"mykey-Float32\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Float32\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Float32()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Float64\", func(t *testing.T) {\n\t\tvar value = 10.12\n\t\terr = dm.Put(ctx, \"mykey-Float64\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Float64\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Float64()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, scannedValue)\n\t})\n\n\tt.Run(\"Bool\", func(t *testing.T) {\n\t\terr = dm.Put(ctx, \"mykey-Bool\", true, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-Bool\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\t\tscannedValue, err := gr.Bool()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, true, scannedValue)\n\t})\n\n\tt.Run(\"time.Time\", func(t *testing.T) {\n\t\tvar value = time.Now()\n\t\terr = dm.Put(ctx, \"mykey-time.Time\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-time.Time\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\n\t\tbuf := bytes.NewBuffer(nil)\n\t\tenc := resp.New(buf)\n\t\terr = enc.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\texpectedValue := new(time.Time)\n\t\terr = resp.Scan(buf.Bytes(), expectedValue)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue, err := gr.Time()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, *expectedValue, scannedValue)\n\t})\n\n\tt.Run(\"time.Duration\", func(t *testing.T) {\n\t\tvar value = time.Second\n\t\terr = dm.Put(ctx, \"mykey-time.Duration\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-time.Duration\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\n\t\tbuf := bytes.NewBuffer(nil)\n\t\tenc := resp.New(buf)\n\t\terr = enc.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\texpectedValue := new(time.Duration)\n\t\terr = resp.Scan(buf.Bytes(), expectedValue)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue, err := gr.Duration()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, *expectedValue, scannedValue)\n\t})\n\n\tt.Run(\"BinaryUnmarshaler\", func(t *testing.T) {\n\t\tvar value = &myType{\n\t\t\tDatabase: \"olric\",\n\t\t}\n\t\terr = dm.Put(ctx, \"mykey-BinaryUnmarshaler\", value, nil)\n\t\trequire.NoError(t, err)\n\n\t\te, err := dm.Get(ctx, \"mykey-BinaryUnmarshaler\")\n\t\trequire.NoError(t, err)\n\n\t\tgr := &GetResponse{entry: e}\n\n\t\tv := myType{}\n\t\terr = gr.Scan(&v)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, value, &v)\n\t})\n}\n\ntype myType struct {\n\tDatabase string\n}\n\nfunc (mt *myType) MarshalBinary() ([]byte, error) {\n\treturn json.Marshal(mt)\n}\n\nfunc (mt *myType) UnmarshalBinary(data []byte) error {\n\treturn json.Unmarshal(data, &mt)\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/olric-data/olric\n\ngo 1.23.0\n\nrequire (\n\tgithub.com/RoaringBitmap/roaring v1.9.4\n\tgithub.com/buraksezer/consistent v0.10.0\n\tgithub.com/cespare/xxhash/v2 v2.3.0\n\tgithub.com/hashicorp/go-multierror v1.1.1\n\tgithub.com/hashicorp/go-sockaddr v1.0.7\n\tgithub.com/hashicorp/logutils v1.0.0\n\tgithub.com/hashicorp/memberlist v0.5.3\n\tgithub.com/pkg/errors v0.9.1\n\tgithub.com/redis/go-redis/v9 v9.8.0\n\tgithub.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529\n\tgithub.com/stretchr/testify v1.10.0\n\tgithub.com/tidwall/btree v1.7.0\n\tgithub.com/tidwall/match v1.1.1\n\tgithub.com/tidwall/redcon v1.6.2\n\tgithub.com/vmihailenco/msgpack/v5 v5.4.1\n\tgolang.org/x/sync v0.14.0\n\tgopkg.in/yaml.v2 v2.4.0\n)\n\nrequire (\n\tgithub.com/armon/go-metrics v0.4.1 // indirect\n\tgithub.com/bits-and-blooms/bitset v1.22.0 // indirect\n\tgithub.com/davecgh/go-spew v1.1.1 // indirect\n\tgithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect\n\tgithub.com/google/btree v1.1.3 // indirect\n\tgithub.com/hashicorp/errwrap v1.1.0 // indirect\n\tgithub.com/hashicorp/go-immutable-radix v1.3.1 // indirect\n\tgithub.com/hashicorp/go-metrics v0.5.4 // indirect\n\tgithub.com/hashicorp/go-msgpack/v2 v2.1.3 // indirect\n\tgithub.com/hashicorp/golang-lru v1.0.2 // indirect\n\tgithub.com/miekg/dns v1.1.65 // indirect\n\tgithub.com/mschoch/smat v0.2.0 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.0 // indirect\n\tgithub.com/vmihailenco/tagparser/v2 v2.0.0 // indirect\n\tgolang.org/x/mod v0.24.0 // indirect\n\tgolang.org/x/net v0.38.0 // indirect\n\tgolang.org/x/sys v0.32.0 // indirect\n\tgolang.org/x/tools v0.31.0 // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ngithub.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=\ngithub.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ=\ngithub.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=\ngithub.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=\ngithub.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=\ngithub.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=\ngithub.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=\ngithub.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=\ngithub.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4=\ngithub.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=\ngithub.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=\ngithub.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=\ngithub.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=\ngithub.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=\ngithub.com/buraksezer/consistent v0.10.0 h1:hqBgz1PvNLC5rkWcEBVAL9dFMBWz6I0VgUCW25rrZlU=\ngithub.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0maujuPowduSpZqmw=\ngithub.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=\ngithub.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=\ngithub.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=\ngithub.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=\ngithub.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=\ngithub.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=\ngithub.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=\ngithub.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=\ngithub.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=\ngithub.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=\ngithub.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=\ngithub.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=\ngithub.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=\ngithub.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=\ngithub.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY=\ngithub.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI=\ngithub.com/hashicorp/go-msgpack/v2 v2.1.3 h1:cB1w4Zrk0O3jQBTcFMKqYQWRFfsSQ/TYKNyUUVyCP2c=\ngithub.com/hashicorp/go-msgpack/v2 v2.1.3/go.mod h1:SjlwKKFnwBXvxD/I1bEcfJIBbEJ+MCUn39TxymNR5ZU=\ngithub.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=\ngithub.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=\ngithub.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=\ngithub.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=\ngithub.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=\ngithub.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=\ngithub.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=\ngithub.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=\ngithub.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=\ngithub.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=\ngithub.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk=\ngithub.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE=\ngithub.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=\ngithub.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=\ngithub.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=\ngithub.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc=\ngithub.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=\ngithub.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=\ngithub.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=\ngithub.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=\ngithub.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=\ngithub.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=\ngithub.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=\ngithub.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=\ngithub.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=\ngithub.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=\ngithub.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=\ngithub.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=\ngithub.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=\ngithub.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=\ngithub.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=\ngithub.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=\ngithub.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=\ngithub.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=\ngithub.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=\ngithub.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=\ngithub.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=\ngithub.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=\ngithub.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=\ngithub.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=\ngithub.com/tidwall/btree v1.1.0/go.mod h1:TzIRzen6yHbibdSfK6t8QimqbUnoxUSrZfeW7Uob0q4=\ngithub.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=\ngithub.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=\ngithub.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=\ngithub.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=\ngithub.com/tidwall/redcon v1.6.2 h1:5qfvrrybgtO85jnhSravmkZyC0D+7WstbfCs3MmPhow=\ngithub.com/tidwall/redcon v1.6.2/go.mod h1:p5Wbsgeyi2VSTBWOcA5vRXrOb9arFTcU2+ZzFjqV75Y=\ngithub.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=\ngithub.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=\ngithub.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=\ngithub.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=\ngithub.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=\ngolang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=\ngolang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=\ngolang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=\ngolang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=\ngolang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=\ngolang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=\ngolang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=\ngolang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=\ngopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "hasher/hasher.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage hasher\n\nimport \"github.com/cespare/xxhash/v2\"\n\n// NewDefaultHasher returns an instance of xxhash package which implements the 64-bit variant of\n// xxHash (XXH64) as described at http://cyan4973.github.io/xxHash/.\nfunc NewDefaultHasher() Hasher {\n\treturn xxhasher{}\n}\n\ntype xxhasher struct{}\n\nfunc (x xxhasher) Sum64(key []byte) uint64 {\n\treturn xxhash.Sum64(key)\n}\n\n// Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.\n// Hasher should minimize collisions (generating same hash for different byte slice)\n// and while performance is also important fast functions are preferable (i.e.\n// you can use FarmHash family).\ntype Hasher interface {\n\tSum64([]byte) uint64\n}\n"
  },
  {
    "path": "integration_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestIntegration_NodesJoinOrLeftDuringQuery(t *testing.T) {\n\t// TODO: https://github.com/olric-data/olric/issues/227\n\tt.Skip(\"TestIntegration_NodesJoinOrLeftDuringQuery: flaky test\")\n\n\tnewConfig := func() *config.Config {\n\t\tc := config.New(\"local\")\n\t\tc.PartitionCount = config.DefaultPartitionCount\n\t\tc.ReplicaCount = 2\n\t\tc.WriteQuorum = 1\n\t\tc.ReadRepair = true\n\t\tc.ReadQuorum = 1\n\t\tc.LogOutput = io.Discard\n\t\trequire.NoError(t, c.Sanitize())\n\t\trequire.NoError(t, c.Validate())\n\t\treturn c\n\t}\n\n\tcluster := newTestOlricCluster(t)\n\n\tdb := cluster.addMemberWithConfig(t, newConfig())\n\tdb2 := cluster.addMemberWithConfig(t, newConfig())\n\n\tt.Log(\"Wait for 1 second before inserting keys\")\n\t<-time.After(time.Second)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 100000; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\")\n\t\trequire.NoError(t, err)\n\t\tif i == 5999 {\n\t\t\tgo cluster.addMemberWithConfig(t, newConfig())\n\t\t}\n\t}\n\n\tgo cluster.addMemberWithConfig(t, newConfig())\n\n\tt.Log(\"Fetch all keys\")\n\n\tfor i := 0; i < 100000; i++ {\n\t\t_, err = dm.Get(context.Background(), fmt.Sprintf(\"mykey-%d\", i))\n\t\tif errors.Is(err, ErrConnRefused) {\n\t\t\t// Rewind\n\t\t\ti--\n\t\t\trequire.NoError(t, c.RefreshMetadata(context.Background()))\n\t\t\tcontinue\n\t\t}\n\t\trequire.NoError(t, err)\n\t\tif i == 5999 {\n\t\t\terr = c.client.Close(db2.name)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tt.Logf(\"Shutdown one of the nodes: %s\", db2.name)\n\t\t\trequire.NoError(t, db2.Shutdown(ctx))\n\n\t\t\tgo cluster.addMemberWithConfig(t, newConfig())\n\n\t\t\tt.Log(\"Wait for \\\"NodeLeave\\\" event propagation\")\n\t\t\t<-time.After(time.Second)\n\t\t}\n\t}\n\n\tfor i := 0; i < 100000; i++ {\n\t\t_, err = dm.Get(context.Background(), fmt.Sprintf(\"mykey-%d\", i))\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc TestIntegration_DMap_Cache_Eviction_LRU_MaxKeys(t *testing.T) {\n\tmaxKeys := 100000\n\tnewConfig := func() *config.Config {\n\t\tc := config.New(\"local\")\n\t\tc.PartitionCount = config.DefaultPartitionCount\n\t\tc.ReplicaCount = 1\n\t\tc.WriteQuorum = 1\n\t\tc.ReadQuorum = 1\n\t\tc.LogOutput = io.Discard\n\t\tc.DMaps.MaxKeys = maxKeys\n\t\tc.DMaps.EvictionPolicy = config.LRUEviction\n\t\trequire.NoError(t, c.Sanitize())\n\t\trequire.NoError(t, c.Validate())\n\t\treturn c\n\t}\n\n\tcluster := newTestOlricCluster(t)\n\n\tdb := cluster.addMemberWithConfig(t, newConfig())\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < maxKeys; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\")\n\t\trequire.NoError(t, err)\n\t}\n\n\tvar total int\n\tfor i := 0; i < maxKeys; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\", NX())\n\t\tif err == ErrKeyFound {\n\t\t\terr = nil\n\t\t} else {\n\t\t\ttotal++\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n\trequire.Greater(t, total, 0)\n\tt.Logf(\"number of misses: %d, utilization rate: %f\", total, float64(100)-(float64(total*100))/float64(maxKeys))\n}\n\nfunc TestIntegration_DMap_Cache_Eviction_MaxKeys(t *testing.T) {\n\tmaxKeys := 100000\n\tnewConfig := func() *config.Config {\n\t\tc := config.New(\"local\")\n\t\tc.PartitionCount = config.DefaultPartitionCount\n\t\tc.ReplicaCount = 1\n\t\tc.WriteQuorum = 1\n\t\tc.ReadQuorum = 1\n\t\tc.LogOutput = io.Discard\n\t\tc.DMaps.MaxKeys = maxKeys\n\t\trequire.NoError(t, c.Sanitize())\n\t\trequire.NoError(t, c.Validate())\n\t\treturn c\n\t}\n\n\tcluster := newTestOlricCluster(t)\n\n\tdb := cluster.addMemberWithConfig(t, newConfig())\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < maxKeys; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\")\n\t\trequire.NoError(t, err)\n\t}\n\n\tvar total int\n\tfor i := maxKeys; i < 2*maxKeys; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\", NX())\n\t\tif err == ErrKeyFound {\n\t\t\terr = nil\n\t\t} else {\n\t\t\ttotal++\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < maxKeys; i++ {\n\t\t_, err = dm.Get(ctx, fmt.Sprintf(\"mykey-%d\", i))\n\t\tif err == ErrKeyNotFound {\n\t\t\terr = nil\n\t\t\ttotal++\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n\trequire.Equal(t, maxKeys, total)\n}\n\nfunc TestIntegration_DMap_Cache_Eviction_MaxIdleDuration(t *testing.T) {\n\tmaxKeys := 100000\n\tnewConfig := func() *config.Config {\n\t\tc := config.New(\"local\")\n\t\tc.PartitionCount = config.DefaultPartitionCount\n\t\tc.ReplicaCount = 1\n\t\tc.WriteQuorum = 1\n\t\tc.ReadQuorum = 1\n\t\tc.LogOutput = io.Discard\n\t\tc.DMaps.MaxIdleDuration = 100 * time.Millisecond\n\t\trequire.NoError(t, c.Sanitize())\n\t\trequire.NoError(t, c.Validate())\n\t\treturn c\n\t}\n\n\tcluster := newTestOlricCluster(t)\n\n\tdb := cluster.addMemberWithConfig(t, newConfig())\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < maxKeys; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\")\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(250 * time.Millisecond)\n\n\tvar total int\n\n\tfor i := 0; i < maxKeys; i++ {\n\t\t_, err = dm.Get(ctx, fmt.Sprintf(\"mykey-%d\", i))\n\t\tif err == ErrKeyNotFound {\n\t\t\terr = nil\n\t\t\ttotal++\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n\trequire.Greater(t, total, 0)\n}\n\nfunc TestIntegration_DMap_Cache_Eviction_TTLDuration(t *testing.T) {\n\tmaxKeys := 100000\n\tnewConfig := func() *config.Config {\n\t\tc := config.New(\"local\")\n\t\tc.PartitionCount = config.DefaultPartitionCount\n\t\tc.ReplicaCount = 1\n\t\tc.WriteQuorum = 1\n\t\tc.ReadQuorum = 1\n\t\tc.LogOutput = io.Discard\n\t\tc.DMaps.TTLDuration = 100 * time.Millisecond\n\t\trequire.NoError(t, c.Sanitize())\n\t\trequire.NoError(t, c.Validate())\n\t\treturn c\n\t}\n\n\tcluster := newTestOlricCluster(t)\n\n\tdb := cluster.addMemberWithConfig(t, newConfig())\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < maxKeys; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\")\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(250 * time.Millisecond)\n\n\tvar total int\n\n\tfor i := 0; i < maxKeys; i++ {\n\t\t_, err := dm.Get(ctx, fmt.Sprintf(\"mykey-%d\", i))\n\t\tif err == ErrKeyNotFound {\n\t\t\terr = nil\n\t\t\ttotal++\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n\trequire.Equal(t, maxKeys, total)\n}\n\nfunc TestIntegration_DMap_Cache_Eviction_LRU_MaxInuse(t *testing.T) {\n\tmaxKeys := 100000\n\tnewConfig := func() *config.Config {\n\t\tc := config.New(\"local\")\n\t\tc.PartitionCount = config.DefaultPartitionCount\n\t\tc.ReplicaCount = 1\n\t\tc.WriteQuorum = 1\n\t\tc.ReadQuorum = 1\n\t\tc.LogOutput = io.Discard\n\t\tc.DMaps.MaxInuse = 100 // bytes\n\t\tc.DMaps.EvictionPolicy = \"LRU\"\n\t\trequire.NoError(t, c.Sanitize())\n\t\trequire.NoError(t, c.Validate())\n\t\treturn c\n\t}\n\n\tcluster := newTestOlricCluster(t)\n\n\tdb := cluster.addMemberWithConfig(t, newConfig())\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < maxKeys; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\")\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(250 * time.Millisecond)\n\n\tvar total int\n\n\tfor i := 0; i < maxKeys; i++ {\n\t\t_, err = dm.Get(ctx, fmt.Sprintf(\"mykey-%d\", i))\n\t\tif err == ErrKeyNotFound {\n\t\t\terr = nil\n\t\t\ttotal++\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n\trequire.Greater(t, total, 0)\n}\n\nfunc TestIntegration_Kill_Nodes_During_Operation(t *testing.T) {\n\tnewConfig := func() *config.Config {\n\t\tc := config.New(\"local\")\n\t\tc.PartitionCount = config.DefaultPartitionCount\n\t\tc.ReplicaCount = 3\n\t\tc.WriteQuorum = 1\n\t\tc.ReadRepair = true\n\t\tc.ReadQuorum = 1\n\t\tc.LogOutput = io.Discard\n\t\trequire.NoError(t, c.Sanitize())\n\t\trequire.NoError(t, c.Validate())\n\t\treturn c\n\t}\n\n\tcluster := newTestOlricCluster(t)\n\n\tdb := cluster.addMemberWithConfig(t, newConfig())\n\n\tcluster.addMemberWithConfig(t, newConfig())\n\tdb3 := cluster.addMemberWithConfig(t, newConfig())\n\tcluster.addMemberWithConfig(t, newConfig())\n\tdb5 := cluster.addMemberWithConfig(t, newConfig())\n\n\tt.Log(\"Wait for 1 second before inserting keys\")\n\t<-time.After(time.Second)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\trequire.NoError(t, err)\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tt.Log(\"Insert keys\")\n\n\tfor i := 0; i < 100000; i++ {\n\t\terr = dm.Put(ctx, fmt.Sprintf(\"mykey-%d\", i), \"myvalue\")\n\t\trequire.NoError(t, err)\n\t}\n\n\tt.Log(\"Fetch all keys\")\n\n\tfor i := 0; i < 100000; i++ {\n\t\t_, err = dm.Get(ctx, fmt.Sprintf(\"mykey-%d\", i))\n\t\tif err == ErrKeyNotFound {\n\t\t\terr = nil\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n\n\tt.Logf(\"Terminate %s\", db3.rt.This())\n\trequire.NoError(t, db3.Shutdown(context.Background()))\n\n\tt.Logf(\"Terminate %s\", db5.rt.This())\n\trequire.NoError(t, db5.Shutdown(context.Background()))\n\n\tt.Log(\"Wait for \\\"NodeLeave\\\" event propagation\")\n\t<-time.After(time.Second)\n\n\tfor i := 0; i < 100000; i++ {\n\t\t_, err = dm.Get(context.Background(), fmt.Sprintf(\"mykey-%d\", i))\n\t\tif errors.Is(err, ErrConnRefused) {\n\t\t\ti--\n\t\t\tfmt.Println(c.RefreshMetadata(context.Background()))\n\t\t\tcontinue\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc scanIntegrationTestCommon(t *testing.T, embedded bool, keyFunc func(i int) string, options ...ScanOption) []map[string]struct{} {\n\tnewConfig := func() *config.Config {\n\t\tc := config.New(\"local\")\n\t\tc.PartitionCount = config.DefaultPartitionCount\n\t\tc.ReplicaCount = 2\n\t\tc.WriteQuorum = 1\n\t\tc.ReadRepair = false\n\t\tc.ReadQuorum = 1\n\t\tc.LogOutput = io.Discard\n\t\tc.TriggerBalancerInterval = time.Millisecond\n\t\trequire.NoError(t, c.Sanitize())\n\t\trequire.NoError(t, c.Validate())\n\t\treturn c\n\t}\n\n\tcluster := newTestOlricCluster(t)\n\n\tdb := cluster.addMemberWithConfig(t, newConfig())\n\tdb2 := cluster.addMemberWithConfig(t, newConfig())\n\t_ = cluster.addMemberWithConfig(t, newConfig())\n\n\tt.Log(\"Wait for 1 second before inserting keys\")\n\t<-time.After(time.Second)\n\n\tctx := context.Background()\n\tvar c Client\n\tvar err error\n\n\tif embedded {\n\t\tc = db.NewEmbeddedClient()\n\t} else {\n\t\tc, err = NewClusterClient([]string{db.name})\n\t\trequire.NoError(t, err)\n\t}\n\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tpassOne := make(map[string]struct{})\n\tpassTwo := make(map[string]struct{})\n\tfor i := 0; i < 10000; i++ {\n\t\tkey := keyFunc(i)\n\t\terr = dm.Put(ctx, key, \"myvalue\")\n\t\trequire.NoError(t, err)\n\t\tpassOne[key] = struct{}{}\n\t\tpassTwo[key] = struct{}{}\n\t}\n\n\tt.Logf(\"Shutdown one of the nodes: %s\", db2.name)\n\trequire.NoError(t, db2.Shutdown(ctx))\n\n\tt.Log(\"Wait for \\\"NodeLeave\\\" event propagation\")\n\t<-time.After(time.Second)\n\n\tt.Log(\"First pass\")\n\n\ts, err := dm.Scan(context.Background(), options...)\n\trequire.NoError(t, err)\n\tfor s.Next() {\n\t\tdelete(passOne, s.Key())\n\t}\n\ts.Close()\n\n\tdb3 := cluster.addMemberWithConfig(t, newConfig())\n\tt.Logf(\"Add a new member: %s\", db3.rt.This())\n\n\t<-time.After(time.Second)\n\n\tt.Log(\"Second pass\")\n\ts, err = dm.Scan(context.Background(), options...)\n\trequire.NoError(t, err)\n\n\tfor s.Next() {\n\t\tdelete(passTwo, s.Key())\n\t}\n\n\treturn []map[string]struct{}{passOne, passTwo}\n}\n\nfunc TestIntegration_Network_Partitioning_Cluster_DM_SCAN(t *testing.T) {\n\tkeyGenerator := func(i int) string {\n\t\treturn fmt.Sprintf(\"mykey-%d\", i)\n\t}\n\tresult := scanIntegrationTestCommon(t, false, keyGenerator)\n\tpassOne, passTwo := result[0], result[1]\n\trequire.Empty(t, passOne)\n\trequire.Empty(t, passTwo)\n}\n\nfunc TestIntegration_Network_Partitioning_Cluster_DM_SCAN_Match(t *testing.T) {\n\tvar oddNumbers int\n\tkeyGenerator := func(i int) string {\n\t\tif i%2 == 0 {\n\t\t\treturn fmt.Sprintf(\"even:%d\", i)\n\t\t}\n\t\toddNumbers++\n\t\treturn fmt.Sprintf(\"odd:%d\", i)\n\t}\n\tresult := scanIntegrationTestCommon(t, false, keyGenerator, Match(\"^even:\"))\n\tpassOne, passTwo := result[0], result[1]\n\trequire.Len(t, passOne, oddNumbers)\n\trequire.Len(t, passTwo, oddNumbers)\n}\n\nfunc TestIntegration_Network_Partitioning_Embedded_DM_SCAN(t *testing.T) {\n\tkeyGenerator := func(i int) string {\n\t\treturn fmt.Sprintf(\"mykey-%d\", i)\n\t}\n\tresult := scanIntegrationTestCommon(t, true, keyGenerator)\n\tpassOne, passTwo := result[0], result[1]\n\trequire.Empty(t, passOne)\n\trequire.Empty(t, passTwo)\n}\n\nfunc TestIntegration_Network_Partitioning_Embedded_DM_SCAN_Match(t *testing.T) {\n\tvar oddNumbers int\n\tkeyGenerator := func(i int) string {\n\t\tif i%2 == 0 {\n\t\t\treturn fmt.Sprintf(\"even:%d\", i)\n\t\t}\n\t\toddNumbers++\n\t\treturn fmt.Sprintf(\"odd:%d\", i)\n\t}\n\tresult := scanIntegrationTestCommon(t, true, keyGenerator, Match(\"^even:\"))\n\tpassOne, passTwo := result[0], result[1]\n\trequire.Len(t, passOne, oddNumbers)\n\trequire.Len(t, passTwo, oddNumbers)\n}\n"
  },
  {
    "path": "internal/bufpool/bufpool.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bufpool\n\nimport (\n\t\"bytes\"\n\t\"sync\"\n)\n\n// BufPool maintains a buffer pool.\ntype BufPool struct {\n\tp sync.Pool\n}\n\n// New creates a new BufPool.\nfunc New() *BufPool {\n\treturn &BufPool{\n\t\tp: sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn new(bytes.Buffer)\n\t\t\t},\n\t\t},\n\t}\n}\n\n// Put resets the buffer and puts it back to the pool.\nfunc (p *BufPool) Put(b *bytes.Buffer) {\n\tb.Reset()\n\tp.p.Put(b)\n}\n\n// Get returns an empty buffer from the pool. It creates a new buffer, if there\n// is no bytes.Buffer available in the pool.\nfunc (p *BufPool) Get() *bytes.Buffer {\n\treturn p.p.Get().(*bytes.Buffer)\n}\n"
  },
  {
    "path": "internal/bufpool/bufpool_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage bufpool\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestBufPool(t *testing.T) {\n\tp := New()\n\tb := make([]byte, 100)\n\tfor i := 0; i < 1000; i++ {\n\t\tbuf := p.Get()\n\t\tnr, err := buf.Write(b)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, 100, nr)\n\t\tp.Put(buf)\n\t}\n}\n"
  },
  {
    "path": "internal/checkpoint/checkpoint.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage checkpoint\n\nimport \"sync/atomic\"\n\nvar (\n\trequired int32\n\tpassed   int32\n)\n\nfunc Add() {\n\tatomic.AddInt32(&required, 1)\n}\n\nfunc Pass() {\n\tatomic.AddInt32(&passed, 1)\n}\n\nfunc AllPassed() bool {\n\treturn atomic.LoadInt32(&passed) == required\n}\n"
  },
  {
    "path": "internal/checkpoint/checkpoint_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage checkpoint\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestCheckpoint(t *testing.T) {\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tAdd()\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tPass()\n\t\t}()\n\t}\n\n\twg.Wait()\n\trequire.Equal(t, true, AllPassed())\n}\n"
  },
  {
    "path": "internal/cluster/balancer/balancer.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage balancer\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/cluster/routingtable\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/environment\"\n\t\"github.com/olric-data/olric/internal/service\"\n\t\"github.com/olric-data/olric/pkg/flog\"\n)\n\ntype Balancer struct {\n\tsync.Mutex\n\n\tlog     *flog.Logger\n\tconfig  *config.Config\n\tprimary *partitions.Partitions\n\tbackup  *partitions.Partitions\n\trt      *routingtable.RoutingTable\n\twg      sync.WaitGroup\n\tctx     context.Context\n\tcancel  context.CancelFunc\n}\n\nfunc New(e *environment.Environment) *Balancer {\n\tc := e.Get(\"config\").(*config.Config)\n\tlog := e.Get(\"logger\").(*flog.Logger)\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &Balancer{\n\t\tconfig:  c,\n\t\tprimary: e.Get(\"primary\").(*partitions.Partitions),\n\t\tbackup:  e.Get(\"backup\").(*partitions.Partitions),\n\t\trt:      e.Get(\"routingtable\").(*routingtable.RoutingTable),\n\t\tlog:     log,\n\t\tctx:     ctx,\n\t\tcancel:  cancel,\n\t}\n}\n\nfunc (b *Balancer) isAlive() bool {\n\tselect {\n\tcase <-b.ctx.Done():\n\t\t// The node is gone.\n\t\treturn false\n\tdefault:\n\t}\n\treturn true\n}\n\nfunc (b *Balancer) scanPartition(sign uint64, part *partitions.Partition, owners ...discovery.Member) {\n\townersStr := func() string {\n\t\tvar names []string\n\t\tfor _, owner := range owners {\n\t\t\tnames = append(names, owner.String())\n\t\t}\n\t\treturn strings.Join(names, \",\")\n\t}()\n\n\tpart.Map().Range(func(rawName, rawFragment interface{}) bool {\n\t\tf := rawFragment.(partitions.Fragment)\n\t\tif f.Stats().Length == 0 {\n\t\t\treturn false\n\t\t}\n\t\tname := strings.TrimPrefix(rawName.(string), \"dmap.\")\n\n\t\tb.log.V(2).Printf(\"[INFO] Moving %s fragment: %s (kind: %s) on PartID: %d to %s\",\n\t\t\tf.Name(), name, part.Kind(), part.ID(), ownersStr)\n\n\t\terr := f.Move(part, name, owners)\n\t\tif err != nil {\n\t\t\tb.log.V(2).Printf(\"[ERROR] Failed to move %s fragment: %s on PartID: %d to %s: %v\",\n\t\t\t\tf.Name(), name, part.ID(), ownersStr, err)\n\t\t}\n\n\t\t// if this returns true, the iteration continues\n\t\treturn !b.breakLoop(sign)\n\t})\n}\n\nfunc (b *Balancer) primaryCopies() {\n\tsign := b.rt.Signature()\n\tfor partID := uint64(0); partID < b.config.PartitionCount; partID++ {\n\t\tif b.breakLoop(sign) {\n\t\t\tbreak\n\t\t}\n\n\t\tpart := b.primary.PartitionByID(partID)\n\t\tif part.Length() == 0 {\n\t\t\t// Empty partition. Skip it.\n\t\t\tcontinue\n\t\t}\n\n\t\towner := part.Owner()\n\t\t// Here we don't use CompareByID function because the routing table is an\n\t\t// eventually consistent data structure and a node can try to move data\n\t\t// to previous instance(the same name but a different birthdate)\n\t\t// of itself. So just check the name.\n\t\tif owner.CompareByName(b.rt.This()) {\n\t\t\t// Already belongs to me.\n\t\t\tcontinue\n\t\t}\n\n\t\t// This is a previous owner. Move the keys.\n\t\tb.scanPartition(sign, part, owner)\n\t}\n}\n\nfunc (b *Balancer) breakLoop(sign uint64) bool {\n\tif !b.isAlive() {\n\t\treturn true\n\t}\n\n\tif sign != b.rt.Signature() {\n\t\t// Routing table is updated. Just quit. Another balancer goroutine\n\t\t// will work on the new table immediately.\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (b *Balancer) backupCopies() {\n\tsign := b.rt.Signature()\nLOOP:\n\tfor partID := uint64(0); partID < b.config.PartitionCount; partID++ {\n\t\tif b.breakLoop(sign) {\n\t\t\tbreak\n\t\t}\n\n\t\tpart := b.backup.PartitionByID(partID)\n\t\tif part.Length() == 0 || part.OwnerCount() == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar (\n\t\t\tcounter       = 1\n\t\t\tcurrentOwners []discovery.Member\n\t\t)\n\n\t\towners := part.Owners()\n\t\tfor i := len(owners) - 1; i >= 0; i-- {\n\t\t\tif counter > b.config.ReplicaCount-1 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcounter++\n\t\t\towner := owners[i]\n\t\t\t// Here we don't use CompareById function because the routing table\n\t\t\t// is an eventually consistent data structure and a node can try to\n\t\t\t// move data to previous instance(the same name but a different birthdate)\n\t\t\t// of itself. So just check the name.\n\t\t\tif b.rt.This().CompareByName(owner) {\n\t\t\t\t// Already belongs to me.\n\t\t\t\tcontinue LOOP\n\t\t\t}\n\t\t\tcurrentOwners = append(currentOwners, owner)\n\t\t}\n\n\t\tif len(currentOwners) == 0 {\n\t\t\tcontinue LOOP\n\t\t}\n\n\t\tb.scanPartition(sign, part, currentOwners...)\n\t}\n}\n\nfunc (b *Balancer) triggerBalancer() {\n\tb.Lock()\n\tdefer b.Unlock()\n\n\tif err := b.rt.CheckBootstrap(); err != nil {\n\t\tb.log.V(2).Printf(\"[WARN] Balancer awaits for bootstrapping\")\n\t\treturn\n\t}\n\n\tb.primaryCopies()\n\n\tif b.config.ReplicaCount > config.MinimumReplicaCount {\n\t\tb.backupCopies()\n\t}\n}\n\nfunc (b *Balancer) BalanceEagerly() {\n\tb.triggerBalancer()\n}\n\nfunc (b *Balancer) balance() {\n\tdefer b.wg.Done()\n\n\ttimer := time.NewTimer(b.config.TriggerBalancerInterval)\n\tdefer timer.Stop()\n\n\tfor {\n\t\ttimer.Reset(b.config.TriggerBalancerInterval)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tb.triggerBalancer()\n\t\tcase <-b.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (b *Balancer) Start() error {\n\tb.wg.Add(1)\n\tgo b.balance()\n\treturn nil\n}\n\nfunc (b *Balancer) RegisterHandlers() {}\n\nfunc (b *Balancer) Shutdown(ctx context.Context) error {\n\tselect {\n\tcase <-b.ctx.Done():\n\t\t// already closed\n\t\treturn nil\n\tdefault:\n\t}\n\n\tb.cancel()\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tb.wg.Wait()\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\terr := ctx.Err()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase <-done:\n\t}\n\n\treturn nil\n}\n\nvar _ service.Service = (*Balancer)(nil)\n"
  },
  {
    "path": "internal/cluster/balancer/balancer_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage balancer\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/cluster/routingtable\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/environment\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/olric-data/olric/internal/testutil/mockfragment\"\n\t\"github.com/stretchr/testify/require\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\nfunc newTestEnvironment(c *config.Config) *environment.Environment {\n\tif c == nil {\n\t\tc = testutil.NewConfig()\n\t}\n\n\te := environment.New()\n\te.Set(\"config\", c)\n\te.Set(\"logger\", testutil.NewFlogger(c))\n\te.Set(\"primary\", partitions.New(c.PartitionCount, partitions.PRIMARY))\n\te.Set(\"backup\", partitions.New(c.PartitionCount, partitions.BACKUP))\n\te.Set(\"client\", server.NewClient(c.Client))\n\treturn e\n}\n\nfunc newBalancerForTest(e *environment.Environment) *Balancer {\n\trt := routingtable.New(e)\n\tsrv := e.Get(\"server\").(*server.Server)\n\tgo func() {\n\t\terr := srv.ListenAndServe()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"ListenAndServe returned an error: %v\", err))\n\t\t}\n\t}()\n\t<-srv.StartedCtx.Done()\n\n\te.Set(\"routingtable\", rt)\n\tb := New(e)\n\treturn b\n}\n\ntype mockCluster struct {\n\tt         *testing.T\n\tpeerPorts []int\n\terrGr     errgroup.Group\n\tctx       context.Context\n\tcancel    context.CancelFunc\n}\n\nfunc newMockCluster(t *testing.T) *mockCluster {\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &mockCluster{\n\t\tt:      t,\n\t\tctx:    ctx,\n\t\tcancel: cancel,\n\t}\n}\n\nfunc (mc *mockCluster) addNode(e *environment.Environment) *Balancer {\n\tif e == nil {\n\t\te = newTestEnvironment(nil)\n\t}\n\tc := e.Get(\"config\").(*config.Config)\n\tc.TriggerBalancerInterval = time.Millisecond\n\tc.DMaps.CheckEmptyFragmentsInterval = time.Millisecond\n\n\tport, err := testutil.GetFreePort()\n\tif err != nil {\n\t\trequire.NoError(mc.t, err)\n\t}\n\tc.MemberlistConfig.BindPort = port\n\n\tvar peers []string\n\tfor _, peerPort := range mc.peerPorts {\n\t\tpeers = append(peers, net.JoinHostPort(\"127.0.0.1\", strconv.Itoa(peerPort)))\n\t}\n\tc.Peers = peers\n\n\tsrv := testutil.NewServer(c)\n\te.Set(\"server\", srv)\n\tb := newBalancerForTest(e)\n\n\terr = b.Start()\n\tif err != nil {\n\t\trequire.NoError(mc.t, err)\n\t}\n\n\terr = b.rt.Join()\n\trequire.NoError(mc.t, err)\n\n\terr = b.rt.Start()\n\tif err != nil {\n\t\trequire.NoError(mc.t, err)\n\t}\n\n\tmc.errGr.Go(func() error {\n\t\t<-mc.ctx.Done()\n\t\treturn srv.Shutdown(context.Background())\n\t})\n\n\tmc.errGr.Go(func() error {\n\t\t<-mc.ctx.Done()\n\t\treturn b.rt.Shutdown(context.Background())\n\t})\n\n\tmc.peerPorts = append(mc.peerPorts, port)\n\n\tmc.t.Cleanup(func() {\n\t\trequire.NoError(mc.t, b.Shutdown(context.Background()))\n\t})\n\n\treturn b\n}\n\nfunc (mc *mockCluster) shutdown() {\n\tmc.cancel()\n\trequire.NoError(mc.t, mc.errGr.Wait())\n}\n\nfunc TestBalance_Primary_Move(t *testing.T) {\n\tcluster := newMockCluster(t)\n\tdefer cluster.shutdown()\n\n\te1 := newTestEnvironment(nil)\n\tcluster.addNode(e1)\n\n\tfragments := make(map[uint64]*mockfragment.MockFragment)\n\n\t// Create a MockFragment and insert some fake data\n\tc := e1.Get(\"config\").(*config.Config)\n\tpart := e1.Get(strings.ToLower(partitions.PRIMARY.String())).(*partitions.Partitions)\n\tfor partID := uint64(0); partID < c.PartitionCount; partID++ {\n\t\tpart := part.PartitionByID(partID)\n\t\ts := mockfragment.New()\n\t\ts.Fill()\n\t\tpart.Map().Store(\"dmap.test-data\", s)\n\t\tfragments[partID] = s\n\t}\n\n\te2 := newTestEnvironment(nil)\n\tb2 := cluster.addNode(e2)\n\n\terr := testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\tif !b2.rt.IsBootstrapped() {\n\t\t\treturn errors.New(\"the second node cannot be bootstrapped\")\n\t\t}\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n\n\tfor partID, f := range fragments {\n\t\tresult := f.Result()\n\t\tif len(result) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\trequire.Len(t, result, 1)\n\t\trequire.NotNil(t, result[partitions.PRIMARY])\n\t\tr := result[partitions.PRIMARY]\n\t\trequire.NotNil(t, r[partID])\n\t\trequire.Equal(t, \"test-data\", r[partID].Name)\n\t\trequire.Equal(t, []discovery.Member{b2.rt.This()}, r[partID].Owners)\n\t}\n}\n\nfunc checkBackupOwnership(e *environment.Environment) error {\n\tc := e.Get(\"config\").(*config.Config)\n\tprimary := e.Get(strings.ToLower(partitions.PRIMARY.String())).(*partitions.Partitions)\n\tbackup := e.Get(strings.ToLower(partitions.BACKUP.String())).(*partitions.Partitions)\n\n\tfor partID := uint64(0); partID < c.PartitionCount; partID++ {\n\t\tprimaryOwner := primary.PartitionByID(partID).Owner()\n\t\tpart := backup.PartitionByID(partID)\n\t\tfor _, owner := range part.Owners() {\n\t\t\tif primaryOwner.CompareByID(owner) {\n\t\t\t\treturn fmt.Errorf(\"%s is the primary and backup owner of partID: %d at the same time\", primaryOwner, partID)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc TestBalance_Empty_Backup_Move(t *testing.T) {\n\tcluster := newMockCluster(t)\n\tdefer cluster.shutdown()\n\n\tc1 := testutil.NewConfig()\n\tc1.ReplicaCount = 2\n\te1 := newTestEnvironment(c1)\n\tb1 := cluster.addNode(e1)\n\n\tb1.rt.UpdateEagerly()\n\n\terr := checkBackupOwnership(e1)\n\trequire.NoError(t, err)\n\n\tc2 := testutil.NewConfig()\n\tc2.ReplicaCount = 2\n\te2 := newTestEnvironment(c2)\n\tb2 := cluster.addNode(e2)\n\n\terr = testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\tif !b2.rt.IsBootstrapped() {\n\t\t\treturn errors.New(\"the second node cannot be bootstrapped\")\n\t\t}\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n\n\tb1.rt.UpdateEagerly()\n\n\terr = checkBackupOwnership(e2)\n\trequire.NoError(t, err)\n}\n\nfunc TestBalance_Backup_Move(t *testing.T) {\n\tcluster := newMockCluster(t)\n\tdefer cluster.shutdown()\n\n\tc1 := testutil.NewConfig()\n\tc1.ReplicaCount = 2\n\te1 := newTestEnvironment(c1)\n\tb1 := cluster.addNode(e1)\n\n\tfragments := make(map[uint64]*mockfragment.MockFragment)\n\n\tc := e1.Get(\"config\").(*config.Config)\n\tpart := e1.Get(strings.ToLower(partitions.BACKUP.String())).(*partitions.Partitions)\n\tfor partID := uint64(0); partID < c.PartitionCount; partID++ {\n\t\tpart := part.PartitionByID(partID)\n\t\ts := mockfragment.New()\n\t\ts.Fill()\n\t\tpart.Map().Store(\"dmap.test-data\", s)\n\t\tfragments[partID] = s\n\t}\n\n\tc2 := testutil.NewConfig()\n\tc2.ReplicaCount = 2\n\te2 := newTestEnvironment(c2)\n\tb2 := cluster.addNode(e2)\n\n\terr := testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\tif !b2.rt.IsBootstrapped() {\n\t\t\treturn errors.New(\"the second node cannot be bootstrapped\")\n\t\t}\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 5; i++ {\n\t\tb1.rt.UpdateEagerly()\n\t\terr = checkBackupOwnership(e2)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor partID, f := range fragments {\n\t\tresult := f.Result()\n\t\tif len(result) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\trequire.Len(t, result, 1)\n\t\trequire.NotNil(t, result[partitions.BACKUP])\n\t\tr := result[partitions.BACKUP]\n\t\trequire.NotNil(t, r[partID])\n\t\trequire.Equal(t, \"test-data\", r[partID].Name)\n\t\trequire.Equal(t, []discovery.Member{b2.rt.This()}, r[partID].Owners)\n\t}\n}\n"
  },
  {
    "path": "internal/cluster/partitions/fragment.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage partitions\n\nimport (\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\ntype Fragment interface {\n\tName() string\n\tStats() storage.Stats\n\tMove(*Partition, string, []discovery.Member) error\n\tCompaction() (bool, error)\n\tDestroy() error\n\tClose() error\n}\n"
  },
  {
    "path": "internal/cluster/partitions/hkey.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage partitions\n\nimport (\n\t\"sync\"\n\t\"unsafe\"\n\n\t\"github.com/olric-data/olric/hasher\"\n)\n\nvar (\n\thashFunc hasher.Hasher\n\tonce     sync.Once\n)\n\nfunc SetHashFunc(h hasher.Hasher) {\n\tonce.Do(func() {\n\t\thashFunc = h\n\t})\n}\n\nfunc HKey(name, key string) uint64 {\n\ttmp := name + key\n\treturn hashFunc.Sum64(*(*[]byte)(unsafe.Pointer(&tmp)))\n}\n"
  },
  {
    "path": "internal/cluster/partitions/hkey_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage partitions\n\nimport (\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/hasher\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestPartitions_HKey(t *testing.T) {\n\tSetHashFunc(hasher.NewDefaultHasher())\n\thkey := HKey(\"storage-unit-name\", \"some-key\")\n\trequire.NotEqualf(t, 0, hkey, \"HKey is zero. This shouldn't be normal\")\n}\n"
  },
  {
    "path": "internal/cluster/partitions/partition.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage partitions\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n\n\t\"github.com/olric-data/olric/internal/discovery\"\n)\n\n// Partition is a basic, logical storage unit in Olric and stores DMaps in a sync.Map.\ntype Partition struct {\n\tsync.RWMutex\n\n\tid     uint64\n\tkind   Kind\n\tm      *sync.Map\n\towners atomic.Value\n}\n\nfunc (p *Partition) Kind() Kind {\n\treturn p.kind\n}\n\nfunc (p *Partition) ID() uint64 {\n\treturn p.id\n}\n\nfunc (p *Partition) Map() *sync.Map {\n\treturn p.m\n}\n\n// Owner returns partition Owner. It's not thread-safe.\nfunc (p *Partition) Owner() discovery.Member {\n\tif p.Kind() == BACKUP {\n\t\t// programming error. it cannot occur at production!\n\t\tpanic(\"cannot call this if backup is true\")\n\t}\n\towners := p.owners.Load().([]discovery.Member)\n\tif len(owners) == 0 {\n\t\tpanic(\"owners list cannot be empty\")\n\t}\n\treturn owners[len(owners)-1]\n}\n\n// OwnerCount returns the current Owner count of a partition.\nfunc (p *Partition) OwnerCount() int {\n\towners := p.owners.Load()\n\tif owners == nil {\n\t\treturn 0\n\t}\n\treturn len(owners.([]discovery.Member))\n}\n\n// Owners loads the partition owners from atomic.Value and returns.\nfunc (p *Partition) Owners() []discovery.Member {\n\towners := p.owners.Load()\n\tif owners == nil {\n\t\treturn []discovery.Member{}\n\t}\n\treturn owners.([]discovery.Member)\n}\n\nfunc (p *Partition) SetOwners(owners []discovery.Member) {\n\tp.owners.Store(owners)\n}\n\nfunc (p *Partition) Length() int {\n\tvar length int\n\tp.Map().Range(func(_, tmp interface{}) bool {\n\t\tu := tmp.(Fragment)\n\t\tlength += u.Stats().Length\n\t\t// Continue scanning.\n\t\treturn true\n\t})\n\treturn length\n}\n"
  },
  {
    "path": "internal/cluster/partitions/partition_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage partitions\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/stretchr/testify/require\"\n)\n\ntype testFragment struct {\n\tlength int\n}\n\nfunc (tf *testFragment) Stats() storage.Stats {\n\treturn storage.Stats{Length: tf.length}\n}\n\nfunc (tf *testFragment) Name() string {\n\treturn \"test-data-structure\"\n}\n\nfunc (tf *testFragment) Move(_ *Partition, _ string, _ []discovery.Member) error {\n\treturn nil\n}\n\nfunc (tf *testFragment) Close() error {\n\treturn nil\n}\n\nfunc (tf *testFragment) Destroy() error {\n\treturn nil\n}\n\nfunc (tf *testFragment) Compaction() (bool, error) {\n\treturn false, nil\n}\n\nfunc TestPartition(t *testing.T) {\n\tp := Partition{\n\t\tid:   1,\n\t\tkind: PRIMARY,\n\t\tm:    &sync.Map{},\n\t}\n\n\ttmp := []discovery.Member{{\n\t\tName: \"test-member\",\n\t}}\n\tp.SetOwners(tmp)\n\n\tt.Run(\"Owners\", func(t *testing.T) {\n\t\towners := p.Owners()\n\t\trequire.Equal(t, tmp, owners, \"Partition owners slice is different\")\n\t})\n\n\tt.Run(\"Owner\", func(t *testing.T) {\n\t\towner := p.Owner()\n\t\trequire.Equal(t, tmp[0], owner, \"Partition owners slice is different\")\n\t})\n\n\tt.Run(\"OwnerCount\", func(t *testing.T) {\n\t\tcount := p.OwnerCount()\n\t\trequire.Equal(t, 1, count)\n\t})\n\n\tt.Run(\"Length\", func(t *testing.T) {\n\t\ts1 := &testFragment{length: 10}\n\t\ts2 := &testFragment{length: 20}\n\t\tp.Map().Store(\"s1\", s1)\n\t\tp.Map().Store(\"s2\", s2)\n\t\tlength := p.Length()\n\t\trequire.Equal(t, 30, length)\n\t})\n}\n"
  },
  {
    "path": "internal/cluster/partitions/partitions.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage partitions\n\nimport (\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/internal/discovery\"\n)\n\ntype Kind int\n\nfunc (k Kind) String() string {\n\tswitch {\n\tcase k == PRIMARY:\n\t\treturn \"Primary\"\n\tcase k == BACKUP:\n\t\treturn \"Backup\"\n\tdefault:\n\t\treturn \"Unknown\"\n\t}\n}\n\nconst (\n\tPRIMARY = Kind(iota + 1)\n\tBACKUP\n)\n\ntype Partitions struct {\n\tcount uint64\n\tkind  Kind\n\tm     map[uint64]*Partition\n}\n\nfunc New(count uint64, kind Kind) *Partitions {\n\tps := &Partitions{\n\t\tkind:  kind,\n\t\tcount: count,\n\t\tm:     make(map[uint64]*Partition),\n\t}\n\tfor i := uint64(0); i < count; i++ {\n\t\tps.m[i] = &Partition{\n\t\t\tid:   i,\n\t\t\tkind: kind,\n\t\t\tm:    &sync.Map{},\n\t\t}\n\t}\n\treturn ps\n}\n\n// PartitionByID returns the partition for the given HKey\nfunc (ps *Partitions) PartitionByID(partID uint64) *Partition {\n\treturn ps.m[partID]\n}\n\n// PartitionIDByHKey returns partition ID for a given HKey.\nfunc (ps *Partitions) PartitionIDByHKey(hkey uint64) uint64 {\n\treturn hkey % ps.count\n}\n\n// PartitionByHKey returns the partition for the given HKey\nfunc (ps *Partitions) PartitionByHKey(hkey uint64) *Partition {\n\tpartID := ps.PartitionIDByHKey(hkey)\n\treturn ps.m[partID]\n}\n\n// PartitionOwnersByHKey loads the partition owners list for a given hkey.\nfunc (ps *Partitions) PartitionOwnersByHKey(hkey uint64) []discovery.Member {\n\tpart := ps.PartitionByHKey(hkey)\n\treturn part.owners.Load().([]discovery.Member)\n}\n\n// PartitionOwnersByID loads the partition owners list for a given hkey.\nfunc (ps *Partitions) PartitionOwnersByID(partID uint64) []discovery.Member {\n\tpart := ps.PartitionByID(partID)\n\treturn part.owners.Load().([]discovery.Member)\n}\n"
  },
  {
    "path": "internal/cluster/partitions/partitions_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage partitions\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/discovery\"\n)\n\nfunc TestPartitions(t *testing.T) {\n\tvar partitionCount uint64 = 271\n\tps := New(partitionCount, PRIMARY)\n\n\tt.Run(\"PartitionById\", func(t *testing.T) {\n\t\tfor partID := uint64(0); partID < partitionCount; partID++ {\n\t\t\tpart := ps.PartitionByID(partID)\n\t\t\tif part.ID() != partID {\n\t\t\t\tt.Fatalf(\"Expected PartID: %d. Got: %d\", partID, part.ID())\n\t\t\t}\n\t\t\tif part.Kind() != PRIMARY {\n\t\t\t\tt.Fatalf(\"Expected Kind: %s. Got: %s\", PRIMARY, part.Kind())\n\t\t\t}\n\t\t}\n\t})\n\n\tt.Run(\"PartitionIdByHKey\", func(t *testing.T) {\n\t\t// 1 % 271 = 1\n\t\tpartID := ps.PartitionIDByHKey(1)\n\t\tif partID != 1 {\n\t\t\tt.Fatalf(\"Expected PartID: 1. Got: %d\", partID)\n\t\t}\n\t})\n\n\tt.Run(\"PartitionByHKey\", func(t *testing.T) {\n\t\t// 1 % 271 = 1\n\t\tpart := ps.PartitionByHKey(1)\n\t\tif part.ID() != 1 {\n\t\t\tt.Fatalf(\"Expected PartID: 1. Got: %d\", part.ID())\n\t\t}\n\t})\n\n\tt.Run(\"PartitionOwnersByHKey\", func(t *testing.T) {\n\t\tpart := ps.PartitionByHKey(1)\n\t\ttmp := []discovery.Member{{\n\t\t\tName: \"test-member\",\n\t\t}}\n\t\tpart.SetOwners(tmp)\n\t\towners := ps.PartitionOwnersByHKey(1)\n\t\tif !reflect.DeepEqual(owners, tmp) {\n\t\t\tt.Fatalf(\"Partition owners slice is different\")\n\t\t}\n\t})\n\n\tt.Run(\"PartitionOwnersById\", func(t *testing.T) {\n\t\tpart := ps.PartitionByID(1)\n\t\ttmp := []discovery.Member{{\n\t\t\tName: \"test-member\",\n\t\t}}\n\t\tpart.SetOwners(tmp)\n\t\towners := ps.PartitionOwnersByID(1)\n\t\tif !reflect.DeepEqual(owners, tmp) {\n\t\t\tt.Fatalf(\"Partition owners slice is different\")\n\t\t}\n\t})\n\n\tt.Run(\"Kind as string\", func(t *testing.T) {\n\t\t// 1 % 271 = 1\n\t\tpart := ps.PartitionByHKey(1)\n\n\t\tif part.Kind().String() != PRIMARY.String() {\n\t\t\tt.Fatalf(\"Expected partition kind: %s. Got: %d\", PRIMARY, part.Kind())\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/callback.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nfunc (r *RoutingTable) AddCallback(f func()) {\n\tr.callbackMtx.Lock()\n\tdefer r.callbackMtx.Unlock()\n\n\tr.callbacks = append(r.callbacks, f)\n}\n\nfunc (r *RoutingTable) runCallbacks() {\n\tdefer r.wg.Done()\n\n\tr.callbackMtx.Lock()\n\tdefer r.callbackMtx.Unlock()\n\n\tfor _, f := range r.callbacks {\n\t\tselect {\n\t\tcase <-r.ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tf()\n\t}\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/callback_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n)\n\nfunc TestRoutingTable_Callback(t *testing.T) {\n\tc := testutil.NewConfig()\n\trt := newRoutingTableForTest(c, testutil.NewServer(c))\n\tvar num int32\n\tincrease := func() {\n\t\tatomic.AddInt32(&num, 1)\n\t}\n\trt.AddCallback(increase)\n\trt.wg.Add(1)\n\tgo rt.runCallbacks()\n\t<-time.After(100 * time.Millisecond)\n\tmodified := atomic.LoadInt32(&num)\n\tif modified != 1 {\n\t\tt.Fatalf(\"Expected number: 1. Got: %v\", modified)\n\t}\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/discovery.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"time\"\n)\n\nvar (\n\tErrServerGone   = errors.New(\"server is gone\")\n\tErrNotJoinedYet = errors.New(\"not joined yet\")\n\tErrClusterJoin  = errors.New(\"cannot join the cluster\")\n\t// ErrOperationTimeout is returned when an operation times out.\n\tErrOperationTimeout = errors.New(\"operation timeout\")\n)\n\n// bootstrapCoordinator prepares the very first routing table and bootstraps the coordinator node.\nfunc (r *RoutingTable) bootstrapCoordinator() error {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tr.fillRoutingTable()\n\t_, err := r.updateRoutingTableOnCluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// The coordinator bootstraps itself.\n\tr.markBootstrapped()\n\tr.log.V(2).Printf(\"[INFO] The cluster coordinator has been bootstrapped\")\n\treturn nil\n}\n\nfunc (r *RoutingTable) attemptToJoin() error {\n\tattempts := 0\n\tfor attempts < r.config.MaxJoinAttempts {\n\t\tselect {\n\t\tcase <-r.ctx.Done():\n\t\t\t// The node is gone.\n\t\t\treturn ErrServerGone\n\t\tdefault:\n\t\t}\n\n\t\tattempts++\n\t\tn, err := r.discovery.Join()\n\t\tif err == nil {\n\t\t\tr.log.V(2).Printf(\"[INFO] Join completed. Synced with %d initial nodes\", n)\n\t\t\treturn nil\n\t\t}\n\n\t\tr.log.V(2).Printf(\"[ERROR] Join attempt returned error: %s\", err)\n\t\tif r.IsBootstrapped() {\n\t\t\tr.log.V(2).Printf(\"[INFO] Bootstrapped by the cluster coordinator\")\n\t\t\treturn nil\n\t\t}\n\n\t\tr.log.V(2).Printf(\"[INFO] Awaits for %s to join again (%d/%d)\",\n\t\t\tr.config.JoinRetryInterval, attempts, r.config.MaxJoinAttempts)\n\t\t<-time.After(r.config.JoinRetryInterval)\n\t}\n\treturn ErrClusterJoin\n}\n\nfunc (r *RoutingTable) tryWithInterval(ctx context.Context, interval time.Duration, f func() error) error {\n\tticker := time.NewTicker(interval)\n\tdefer ticker.Stop()\n\n\tvar funcErr error\n\n\tfuncErr = f()\n\tif funcErr == nil {\n\t\t// Done. No need to try with interval\n\t\treturn nil\n\t}\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t// context is done\n\t\t\terr := ctx.Err()\n\t\t\tif errors.Is(err, context.DeadlineExceeded) {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tif errors.Is(err, context.Canceled) {\n\t\t\t\treturn ErrServerGone\n\t\t\t}\n\t\t\treturn err\n\t\tcase <-ticker.C:\n\t\t\tfuncErr = f()\n\t\t\tif funcErr == nil {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\treturn funcErr\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/discovery_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n)\n\nfunc TestRoutingTable_tryWithInterval(t *testing.T) {\n\tc := testutil.NewConfig()\n\tsrv := testutil.NewServer(c)\n\trt := newRoutingTableForTest(c, srv)\n\n\tvar foobarError = errors.New(\"foobar\")\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)\n\tdefer cancel()\n\terr := rt.tryWithInterval(ctx, time.Millisecond, func() error {\n\t\treturn foobarError\n\t})\n\n\tif err != foobarError {\n\t\tt.Fatalf(\"Expected foobarError. Got: %v\", foobarError)\n\t}\n}\n\nfunc TestRoutingTable_attemptToJoin(t *testing.T) {\n\tc := testutil.NewConfig()\n\tc.MaxJoinAttempts = 3\n\tc.JoinRetryInterval = 100 * time.Millisecond\n\tc.Peers = []string{\"127.0.0.1:0\"} // An invalid peer\n\tsrv := testutil.NewServer(c)\n\trt := newRoutingTableForTest(c, srv)\n\n\terr := rt.discovery.Start()\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\tdefer func() {\n\t\terr = rt.discovery.Shutdown()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t}()\n\n\terr = rt.attemptToJoin()\n\tif err != ErrClusterJoin {\n\t\tt.Fatalf(\"Expected ErrClusterJoin. Got: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/distribute.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"errors\"\n\n\t\"github.com/buraksezer/consistent\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n)\n\nfunc (r *RoutingTable) distributePrimaryCopies(partID uint64) []discovery.Member {\n\t// First you need to create a copy of the owners list. Don't modify the current list.\n\tpart := r.primary.PartitionByID(partID)\n\towners := make([]discovery.Member, part.OwnerCount())\n\tcopy(owners, part.Owners())\n\n\t// Find the new partition owner.\n\tnewOwner := r.consistent.GetPartitionOwner(int(partID))\n\n\t// First run.\n\tif len(owners) == 0 {\n\t\towners = append(owners, newOwner.(discovery.Member))\n\t\treturn owners\n\t}\n\n\t// Prune dead nodes\n\tfor i := 0; i < len(owners); i++ {\n\t\towner := owners[i]\n\t\tcurrent, err := r.discovery.FindMemberByName(owner.Name)\n\t\tif err != nil {\n\t\t\tr.log.V(6).Printf(\"[DEBUG] Failed to find %s in the cluster: %v\", owner, err)\n\t\t\towners = append(owners[:i], owners[i+1:]...)\n\t\t\ti--\n\t\t\tr.log.V(3).Printf(\"[INFO] Member: %s has been deleted from the primary owners list of PartID: %v\", owner.String(), partID)\n\t\t\tcontinue\n\t\t}\n\t\tif !owner.CompareByID(current) {\n\t\t\tr.log.V(3).Printf(\"[WARN] One of the partitions owners is probably re-joined: %s\", current)\n\t\t\towners = append(owners[:i], owners[i+1:]...)\n\t\t\ti--\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// Prune empty nodes\n\tfor i := 0; i < len(owners); i++ {\n\t\towner := owners[i]\n\t\tcmd := protocol.NewLengthOfPart(partID).Command(r.ctx)\n\t\trc := r.client.Get(owner.String())\n\t\terr := rc.Process(r.ctx, cmd)\n\t\tif err != nil {\n\t\t\tr.log.V(6).Printf(\"[DEBUG] Failed to check key count on backup \"+\n\t\t\t\t\"partition: %d: %v\", partID, err)\n\t\t\t// Pass it. If the node is down, memberlist package will send a leave event.\n\t\t\tcontinue\n\t\t}\n\n\t\tcount, err := cmd.Result()\n\t\tif err != nil {\n\t\t\tr.log.V(6).Printf(\"[DEBUG] Failed to check key count on backup \"+\n\t\t\t\t\"partition: %d: %v\", partID, err)\n\t\t\t// Pass it. If the node is down, memberlist package will send a leave event.\n\t\t\tcontinue\n\t\t}\n\n\t\tif count == 0 {\n\t\t\t// Empty partition. Delete it from ownership list.\n\t\t\towners = append(owners[:i], owners[i+1:]...)\n\t\t\ti--\n\t\t}\n\t}\n\n\t// Here add the new partition newOwner.\n\tfor i, owner := range owners {\n\t\tif owner.CompareByID(newOwner.(discovery.Member)) {\n\t\t\t// Remove it from the current position\n\t\t\towners = append(owners[:i], owners[i+1:]...)\n\t\t\t// Append it again to head\n\t\t\treturn append(owners, newOwner.(discovery.Member))\n\t\t}\n\t}\n\treturn append(owners, newOwner.(discovery.Member))\n}\n\nfunc (r *RoutingTable) getReplicaOwners(partID uint64) ([]consistent.Member, error) {\n\tfor i := r.config.ReplicaCount; i > 0; i-- {\n\t\tnewOwners, err := r.consistent.GetClosestNForPartition(int(partID), i)\n\t\tif errors.Is(err, consistent.ErrInsufficientMemberCount) {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\t// Fail early\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newOwners, nil\n\t}\n\treturn nil, consistent.ErrInsufficientMemberCount\n}\n\nfunc isOwner(member discovery.Member, owners []consistent.Member) bool {\n\tfor _, owner := range owners {\n\t\tif member.Name == owner.String() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (r *RoutingTable) distributeBackups(partID uint64) []discovery.Member {\n\tpart := r.backup.PartitionByID(partID)\n\towners := make([]discovery.Member, part.OwnerCount())\n\tcopy(owners, part.Owners())\n\n\tnewOwners, err := r.getReplicaOwners(partID)\n\tif err != nil {\n\t\tr.log.V(3).Printf(\"[ERROR] Failed to get replica owners for PartID: %d: %v\",\n\t\t\tpartID, err)\n\t\treturn nil\n\t}\n\n\t// Remove the primary owner\n\tnewOwners = newOwners[1:]\n\n\t// First run\n\tif len(owners) == 0 {\n\t\tfor _, owner := range newOwners {\n\t\t\towners = append(owners, owner.(discovery.Member))\n\t\t}\n\t\treturn owners\n\t}\n\n\t// Prune dead nodes\n\tfor i := 0; i < len(owners); i++ {\n\t\tbackup := owners[i]\n\t\tcur, err := r.discovery.FindMemberByName(backup.Name)\n\t\tif err != nil {\n\t\t\tr.log.V(6).Printf(\"[DEBUG] Failed to find %s in the cluster: %v\", backup, err)\n\t\t\t// Delete it.\n\t\t\towners = append(owners[:i], owners[i+1:]...)\n\t\t\ti--\n\t\t\tr.log.V(6).Printf(\"[INFO] Member: %s has been deleted from the backup owners list of PartID: %v\", backup.String(), partID)\n\t\t\tcontinue\n\t\t}\n\t\tif !backup.CompareByID(cur) {\n\t\t\tr.log.V(3).Printf(\"[WARN] One of the backup owners is probably re-joined: %s\", cur)\n\t\t\t// Delete it.\n\t\t\towners = append(owners[:i], owners[i+1:]...)\n\t\t\ti--\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// Prune empty nodes\n\tfor i := 0; i < len(owners); i++ {\n\t\tbackup := owners[i]\n\t\tcmd := protocol.NewLengthOfPart(partID).SetReplica().Command(r.ctx)\n\t\trc := r.client.Get(backup.String())\n\t\terr := rc.Process(r.ctx, cmd)\n\t\tif err != nil {\n\t\t\tr.log.V(6).Printf(\"[DEBUG] Failed to check key count on backup \"+\n\t\t\t\t\"partition: %d: %v\", partID, err)\n\t\t\t// Pass it. If the node is down, memberlist package will send a leave event.\n\t\t\tcontinue\n\t\t}\n\t\tcount, err := cmd.Result()\n\t\tif err != nil {\n\t\t\tr.log.V(6).Printf(\"[DEBUG] Failed to check key count on backup \"+\n\t\t\t\t\"partition: %d: %v\", partID, err)\n\t\t\t// Pass it. If the node is down, memberlist package will send a leave event.\n\t\t\tcontinue\n\t\t}\n\n\t\tif count != 0 {\n\t\t\t// About this scenario:\n\t\t\t//\n\t\t\t// * ReplicaCount = 3\n\t\t\t// * Create three nodes and insert some keys\n\t\t\t// * Kill one of the nodes\n\t\t\t// * Now we have replicas that it's impossible to transfer its ownership\n\t\t\t// * Since we cannot drop a healthy replica, we prefer to keep it until\n\t\t\t//   a new node joined. Then, we transfer the ownership safely.\n\t\t\t// * During this incident, a node owns a primary and backup replicas at the same time.\n\t\t\tif !isOwner(backup, newOwners) {\n\t\t\t\tr.log.V(3).Printf(\"[WARN] %s hosts primary and replica copies \"+\n\t\t\t\t\t\"for PartID: %d\", backup, partID)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Empty node, delete it.\n\t\towners = append(owners[:i], owners[i+1:]...)\n\t\ti--\n\t}\n\n\t// Here add the new backup owners.\n\tfor _, newOwner := range newOwners {\n\t\tvar exists bool\n\t\tfor i, owner := range owners {\n\t\t\tif owner.CompareByID(newOwner.(discovery.Member)) {\n\t\t\t\texists = true\n\t\t\t\t// Remove it from the current position\n\t\t\t\towners = append(owners[:i], owners[i+1:]...)\n\t\t\t\t// Append it again to head\n\t\t\t\towners = append(owners, newOwner.(discovery.Member))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exists {\n\t\t\towners = append(owners, newOwner.(discovery.Member))\n\t\t}\n\t}\n\treturn owners\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/distribute_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n)\n\nfunc TestRoutingTable_distributedBackups(t *testing.T) {\n\tcluster := newTestCluster()\n\tdefer cluster.cancel()\n\n\tc1 := testutil.NewConfig()\n\tc1.ReplicaCount = 2\n\trt1, err := cluster.addNode(c1)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tif !rt1.IsBootstrapped() {\n\t\tt.Fatalf(\"The coordinator node cannot be bootstrapped\")\n\t}\n\n\tc2 := testutil.NewConfig()\n\tc2.ReplicaCount = 2\n\trt2, err := cluster.addNode(c2)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\terr = testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\tif !rt2.IsBootstrapped() {\n\t\t\treturn errors.New(\"the second node cannot be bootstrapped\")\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\terr = rt1.Shutdown(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tc3 := testutil.NewConfig()\n\tc3.ReplicaCount = 2\n\trt3, err := cluster.addNode(c3)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\trt2.UpdateEagerly()\n\n\tfor partID := uint64(0); partID < c3.PartitionCount; partID++ {\n\t\tpart := rt3.backup.PartitionByID(partID)\n\t\tif part.OwnerCount() != 1 {\n\t\t\tt.Fatalf(\"Expected backup owners count: 1. Got: %d\", part.OwnerCount())\n\t\t}\n\n\t\tfor _, owner := range part.Owners() {\n\t\t\tif owner.CompareByID(rt1.This()) {\n\t\t\t\tt.Fatalf(\"Dead node still a replica owner: %v\", rt1.This())\n\t\t\t}\n\t\t}\n\t}\n\n\terr = cluster.shutdown()\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/events.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"time\"\n\n\t\"github.com/olric-data/olric/events\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n)\n\nfunc (r *RoutingTable) publishNodeJoinEvent(m *discovery.Member) {\n\tdefer r.wg.Done()\n\n\trc := r.client.Get(r.this.String())\n\tmessage := events.NodeJoinEvent{\n\t\tKind:      events.KindNodeJoinEvent,\n\t\tSource:    r.this.String(),\n\t\tNodeJoin:  m.String(),\n\t\tTimestamp: time.Now().UnixNano(),\n\t}\n\tdata, err := message.Encode()\n\tif err != nil {\n\t\tr.log.V(3).Printf(\"[ERROR] Failed to encode NodeJoinEvent: %v\", err)\n\t\treturn\n\t}\n\terr = rc.Publish(r.ctx, events.ClusterEventsChannel, data).Err()\n\tif err != nil {\n\t\tr.log.V(3).Printf(\"[ERROR] Failed to publish NodeJoinEvent to %s: %v\", events.ClusterEventsChannel, err)\n\t}\n}\n\nfunc (r *RoutingTable) publishNodeLeftEvent(m *discovery.Member) {\n\tdefer r.wg.Done()\n\n\trc := r.client.Get(r.this.String())\n\tmessage := events.NodeLeftEvent{\n\t\tKind:      events.KindNodeLeftEvent,\n\t\tSource:    r.this.String(),\n\t\tNodeLeft:  m.String(),\n\t\tTimestamp: time.Now().UnixNano(),\n\t}\n\tdata, err := message.Encode()\n\tif err != nil {\n\t\tr.log.V(3).Printf(\"[ERROR] Failed to encode NodeLeftEvent: %v\", err)\n\t\treturn\n\t}\n\terr = rc.Publish(r.ctx, events.ClusterEventsChannel, data).Err()\n\tif err != nil {\n\t\tr.log.V(3).Printf(\"[ERROR] Failed to publish NodeLeftEvent to %s: %v\", events.ClusterEventsChannel, err)\n\t}\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/events_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/events\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc TestRoutingTable_publishNodeJoinEvent(t *testing.T) {\n\tcluster := newTestCluster()\n\tdefer cluster.cancel()\n\n\tc := testutil.NewConfig()\n\trt, err := cluster.addNode(c)\n\trequire.NoError(t, err)\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\trt.server.ServeMux().HandleFunc(protocol.PubSub.Publish, func(conn redcon.Conn, cmd redcon.Command) {\n\t\tdefer cancel()\n\n\t\tpublishCmd, err := protocol.ParsePublishCommand(cmd)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, events.ClusterEventsChannel, publishCmd.Channel)\n\n\t\tv := events.NodeJoinEvent{}\n\t\terr = json.Unmarshal([]byte(publishCmd.Message), &v)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, events.KindNodeJoinEvent, v.Kind)\n\t\trequire.Equal(t, rt.this.String(), v.Source)\n\t\trequire.Equal(t, rt.this.String(), v.NodeJoin)\n\n\t\tconn.WriteInt(1)\n\t})\n\n\tm := discovery.NewMember(c)\n\trt.wg.Add(1)\n\tgo rt.publishNodeJoinEvent(&m)\n\t<-ctx.Done()\n\trequire.ErrorIs(t, context.Canceled, ctx.Err())\n}\n\nfunc TestRoutingTable_publishNodeLeftEvent(t *testing.T) {\n\tcluster := newTestCluster()\n\tdefer cluster.cancel()\n\n\tc := testutil.NewConfig()\n\trt, err := cluster.addNode(c)\n\trequire.NoError(t, err)\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\trt.server.ServeMux().HandleFunc(protocol.PubSub.Publish, func(conn redcon.Conn, cmd redcon.Command) {\n\t\tdefer cancel()\n\t\tpublishCmd, err := protocol.ParsePublishCommand(cmd)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, events.ClusterEventsChannel, publishCmd.Channel)\n\n\t\tv := events.NodeLeftEvent{}\n\t\terr = json.Unmarshal([]byte(publishCmd.Message), &v)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, events.KindNodeLeftEvent, v.Kind)\n\t\trequire.Equal(t, rt.this.String(), v.Source)\n\t\trequire.Equal(t, rt.this.String(), v.NodeLeft)\n\n\t\tconn.WriteInt(1)\n\t})\n\n\tm := discovery.NewMember(c)\n\trt.wg.Add(1)\n\tgo rt.publishNodeLeftEvent(&m)\n\t<-ctx.Done()\n\trequire.ErrorIs(t, context.Canceled, ctx.Err())\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"github.com/olric-data/olric/internal/protocol\"\n)\n\nfunc (r *RoutingTable) RegisterHandlers() {\n\tr.server.ServeMux().HandleFunc(protocol.Internal.UpdateRouting, r.updateRoutingCommandHandler)\n\tr.server.ServeMux().HandleFunc(protocol.Internal.LengthOfPart, r.lengthOfPartCommandHandler)\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/left_over_data.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n)\n\nfunc (r *RoutingTable) processLeftOverDataReports(reports map[discovery.Member]*leftOverDataReport) {\n\tcheck := func(member discovery.Member, owners []discovery.Member) bool {\n\t\tfor _, owner := range owners {\n\t\t\tif member.CompareByID(owner) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tensureOwnership := func(member discovery.Member, partID uint64, part *partitions.Partition) {\n\t\towners := part.Owners()\n\t\tif check(member, owners) {\n\t\t\treturn\n\t\t}\n\t\t// This section is protected by routingMtx against parallel writers.\n\t\t//\n\t\t// Copy owners and append the member to head\n\t\tnewOwners := make([]discovery.Member, len(owners))\n\t\tcopy(newOwners, owners)\n\t\t// Prepend\n\t\tnewOwners = append([]discovery.Member{member}, newOwners...)\n\t\tpart.SetOwners(newOwners)\n\t\tr.log.V(2).Printf(\"[INFO] %s still have some data for PartID (kind: %s): %d\", member, part.Kind(), partID)\n\t}\n\n\t// data structures in this function is guarded by routingMtx\n\tfor member, report := range reports {\n\t\tfor _, partID := range report.Partitions {\n\t\t\tpart := r.primary.PartitionByID(partID)\n\t\t\tensureOwnership(member, partID, part)\n\t\t}\n\n\t\tfor _, partID := range report.Backups {\n\t\t\tpart := r.backup.PartitionByID(partID)\n\t\t\tensureOwnership(member, partID, part)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/left_over_data_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/olric-data/olric/internal/testutil/mockfragment\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestRoutingTable_LeftOverData(t *testing.T) {\n\tcluster := newTestCluster()\n\tdefer cluster.cancel()\n\n\tc1 := testutil.NewConfig()\n\trt1, err := cluster.addNode(c1)\n\trequire.NoError(t, err)\n\n\tif !rt1.IsBootstrapped() {\n\t\tt.Fatalf(\"The coordinator node cannot be bootstrapped\")\n\t}\n\n\tfor partID := uint64(0); partID < c1.PartitionCount; partID++ {\n\t\tpart := rt1.primary.PartitionByID(partID)\n\t\tts := mockfragment.New()\n\t\tts.Fill()\n\t\tpart.Map().Store(\"test-data\", ts)\n\t}\n\n\tc2 := testutil.NewConfig()\n\trt2, err := cluster.addNode(c2)\n\trequire.NoError(t, err)\n\n\terr = testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\tif !rt2.IsBootstrapped() {\n\t\t\treturn errors.New(\"the second node cannot be bootstrapped\")\n\t\t}\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n\n\tfor partID := uint64(0); partID < c2.PartitionCount; partID++ {\n\t\tpart := rt2.primary.PartitionByID(partID)\n\t\tts := mockfragment.New()\n\t\tts.Fill()\n\t\tpart.Map().Store(\"test-data\", ts)\n\t}\n\n\trt1.UpdateEagerly()\n\n\tfor partID := uint64(0); partID < c1.PartitionCount; partID++ {\n\t\tpart := rt1.primary.PartitionByID(partID)\n\t\tif len(part.Owners()) != 2 {\n\t\t\tt.Fatalf(\"Expected partition owners count: 2. Got: %d, PartID: %d\", part.OwnerCount(), partID)\n\t\t}\n\t}\n\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/members.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/internal/discovery\"\n)\n\ntype Members struct {\n\tsync.RWMutex\n\tm map[uint64]discovery.Member\n}\n\nfunc newMembers() *Members {\n\treturn &Members{\n\t\tm: map[uint64]discovery.Member{},\n\t}\n}\n\nfunc (m *Members) Add(member discovery.Member) {\n\tm.m[member.ID] = member\n}\n\nfunc (m *Members) Get(id uint64) (discovery.Member, error) {\n\tmember, ok := m.m[id]\n\tif !ok {\n\t\treturn discovery.Member{}, fmt.Errorf(\"member not found with id: %d\", id)\n\t}\n\treturn member, nil\n}\n\nfunc (m *Members) Delete(id uint64) {\n\tdelete(m.m, id)\n}\n\nfunc (m *Members) DeleteByName(other discovery.Member) {\n\tfor id, member := range m.m {\n\t\tif member.CompareByName(other) {\n\t\t\tdelete(m.m, id)\n\t\t}\n\t}\n}\n\nfunc (m *Members) Length() int {\n\treturn len(m.m)\n}\n\nfunc (m *Members) Range(f func(id uint64, member discovery.Member) bool) {\n\tfor id, member := range m.m {\n\t\tif !f(id, member) {\n\t\t\tbreak\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/members_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/discovery\"\n)\n\nfunc TestMembers_Get(t *testing.T) {\n\tm := newMembers()\n\tmember := discovery.Member{\n\t\tName: \"localhost:3320\",\n\t\tID:   6054057,\n\t}\n\tm.Add(member)\n\n\tr, err := m.Get(member.ID)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\tif !reflect.DeepEqual(r, member) {\n\t\tt.Fatalf(\"Retrived member is different\")\n\t}\n}\nfunc TestMembers_Delete(t *testing.T) {\n\tm := newMembers()\n\tmember := discovery.Member{\n\t\tName: \"localhost:3320\",\n\t\tID:   6054057,\n\t}\n\tm.Add(member)\n\tm.Delete(member.ID)\n\t_, err := m.Get(member.ID)\n\tif err == nil {\n\t\tt.Fatalf(\"Expected and error. Got: %v\", err)\n\t}\n}\n\nfunc TestMembers_DeleteByName(t *testing.T) {\n\tm := newMembers()\n\tmember := discovery.Member{\n\t\tName: \"localhost:3320\",\n\t\tID:   6054057,\n\t}\n\tm.Add(member)\n\tm.DeleteByName(member)\n\t_, err := m.Get(member.ID)\n\tif err == nil {\n\t\tt.Fatalf(\"Expected and error. Got: %v\", err)\n\t}\n}\n\nfunc TestMembers_Length(t *testing.T) {\n\tm := newMembers()\n\tmember := discovery.Member{\n\t\tName: \"localhost:3320\",\n\t\tID:   6054057,\n\t}\n\tm.Add(member)\n\tif m.Length() != 1 {\n\t\tt.Fatalf(\"Expected length: 1. Got: %d\", m.Length())\n\t}\n}\n\nfunc TestMembers_Range(t *testing.T) {\n\tm := newMembers()\n\tmember := discovery.Member{\n\t\tName: \"localhost:3320\",\n\t\tID:   6054057,\n\t}\n\tm.Add(member)\n\tm.Range(func(id uint64, m discovery.Member) bool {\n\t\tif id != member.ID {\n\t\t\tt.Fatalf(\"Expected id: %d. Got: %d\", member.ID, id)\n\t\t}\n\n\t\tif member.Name != m.Name {\n\t\t\tt.Fatalf(\"Expected Name: %s. Got: %s\", member.Name, m.Name)\n\t\t}\n\t\treturn true\n\t})\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/operations.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/cespare/xxhash/v2\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n\t\"github.com/vmihailenco/msgpack/v5\"\n)\n\nfunc (r *RoutingTable) lengthOfPartCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\t// The command handlers of the routing table service should wait for the cluster join event.\n\t<-r.joined\n\n\tlengthOfPartCmd, err := protocol.ParseLengthOfPartCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tvar part *partitions.Partition\n\tif lengthOfPartCmd.Replica {\n\t\tpart = r.backup.PartitionByID(lengthOfPartCmd.PartID)\n\t} else {\n\t\tpart = r.primary.PartitionByID(lengthOfPartCmd.PartID)\n\t}\n\n\tconn.WriteInt(part.Length())\n}\n\nfunc (r *RoutingTable) verifyRoutingTable(id uint64, table map[uint64]*route) error {\n\t// Check the coordinator\n\tcoordinator, err := r.discovery.FindMemberByID(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmyCoordinator := r.discovery.GetCoordinator()\n\tif !coordinator.CompareByID(myCoordinator) {\n\t\treturn fmt.Errorf(\"unrecognized cluster coordinator: %s: %s\", coordinator, myCoordinator)\n\t}\n\n\t// Compare partition counts to catch a possible inconsistencies in configuration\n\tif r.config.PartitionCount != uint64(len(table)) {\n\t\treturn fmt.Errorf(\"invalid partition count: %d\", len(table))\n\t}\n\treturn nil\n}\n\nfunc (r *RoutingTable) updateRoutingCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\t// The command handlers of the routing table service should wait for the cluster join event.\n\t<-r.joined\n\n\tr.updateRoutingMtx.Lock()\n\tdefer r.updateRoutingMtx.Unlock()\n\n\tupdateRoutingCmd, err := protocol.ParseUpdateRoutingCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\ttable := make(map[uint64]*route)\n\terr = msgpack.Unmarshal(updateRoutingCmd.Payload, &table)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\t// Log this event\n\tcoordinator, err := r.discovery.FindMemberByID(updateRoutingCmd.CoordinatorID)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tr.log.V(3).Printf(\"[INFO] Routing table has been pushed by %s\", coordinator)\n\n\tif err = r.verifyRoutingTable(updateRoutingCmd.CoordinatorID, table); err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\t// owners(atomic.value) is guarded by routingUpdateMtx against parallel writers.\n\t// Calculate routing signature. This is useful to control balancing tasks.\n\tr.setSignature(xxhash.Sum64(updateRoutingCmd.Payload))\n\tfor partID, data := range table {\n\t\t// Set partition(primary copies) owners\n\t\tpart := r.primary.PartitionByID(partID)\n\t\tpart.SetOwners(data.Owners)\n\n\t\t// Set backup owners\n\t\tbpart := r.backup.PartitionByID(partID)\n\t\tbpart.SetOwners(data.Backups)\n\t}\n\n\t// Used by the LRU implementation.\n\tr.setOwnedPartitionCount()\n\n\t// Bootstrapped by the coordinator.\n\tr.markBootstrapped()\n\n\t// Collect report\n\tvalue, err := r.prepareLeftOverDataReport()\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\t// Call balancer to distribute load evenly\n\tr.wg.Add(1)\n\tgo r.runCallbacks()\n\tconn.WriteBulk(value)\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/routingtable.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\n\t\"github.com/buraksezer/consistent\"\n\t\"github.com/hashicorp/memberlist\"\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/checkpoint\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/environment\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/internal/service\"\n\t\"github.com/olric-data/olric/pkg/flog\"\n)\n\n// ErrClusterQuorum means that the cluster could not reach a healthy numbers of members to operate.\nvar ErrClusterQuorum = errors.New(\"cannot be reached cluster quorum to operate\")\n\ntype route struct {\n\tOwners  []discovery.Member\n\tBackups []discovery.Member\n}\n\ntype RoutingTable struct {\n\tsync.RWMutex // routingMtx\n\n\t// Currently owned partition count. Approximate LRU implementation\n\t// uses that.\n\townedPartitionCount uint64\n\tsignature           uint64\n\t// numMembers is used to check cluster quorum.\n\tnumMembers int32\n\n\t// These values is useful to control operation status.\n\tbootstrapped int32\n\n\tupdateRoutingMtx sync.Mutex\n\ttable            map[uint64]*route\n\tconsistent       *consistent.Consistent\n\tthis             discovery.Member\n\tmembers          *Members\n\tconfig           *config.Config\n\tlog              *flog.Logger\n\tprimary          *partitions.Partitions\n\tbackup           *partitions.Partitions\n\tclient           *server.Client\n\tserver           *server.Server\n\tdiscovery        *discovery.Discovery\n\tcallbacks        []func()\n\tcallbackMtx      sync.Mutex\n\tpushPeriod       time.Duration\n\t// The command handlers of the routing table service should wait for the cluster join event.\n\tjoined chan struct{}\n\tctx    context.Context\n\tcancel context.CancelFunc\n\twg     sync.WaitGroup\n}\n\nfunc registerErrors() {\n\tprotocol.SetError(\"CLUSTERQUORUM\", ErrClusterQuorum)\n\tprotocol.SetError(\"CLUSTERJOIN\", ErrClusterJoin)\n\tprotocol.SetError(\"SERVERGONE\", ErrServerGone)\n\tprotocol.SetError(\"OPERATIONTIMEOUT\", ErrOperationTimeout)\n}\n\nfunc New(e *environment.Environment) *RoutingTable {\n\t// The routing table has to be started properly before accepting connections.\n\tcheckpoint.Add()\n\tc := e.Get(\"config\").(*config.Config)\n\tlog := e.Get(\"logger\").(*flog.Logger)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tcc := consistent.Config{\n\t\tHasher:            c.Hasher,\n\t\tPartitionCount:    int(c.PartitionCount),\n\t\tReplicationFactor: 20, // TODO: This also may be a configuration param.\n\t\tLoad:              c.LoadFactor,\n\t}\n\n\trt := &RoutingTable{\n\t\tmembers:    newMembers(),\n\t\tdiscovery:  discovery.New(log, c),\n\t\tconfig:     c,\n\t\tlog:        log,\n\t\tconsistent: consistent.New(nil, cc),\n\t\tprimary:    e.Get(\"primary\").(*partitions.Partitions),\n\t\tbackup:     e.Get(\"backup\").(*partitions.Partitions),\n\t\tclient:     e.Get(\"client\").(*server.Client),\n\t\tserver:     e.Get(\"server\").(*server.Server),\n\t\tpushPeriod: c.RoutingTablePushInterval,\n\t\tjoined:     make(chan struct{}),\n\t\tctx:        ctx,\n\t\tcancel:     cancel,\n\t}\n\tregisterErrors()\n\trt.RegisterHandlers()\n\treturn rt\n}\n\nfunc (r *RoutingTable) Discovery() *discovery.Discovery {\n\treturn r.discovery\n}\n\nfunc (r *RoutingTable) This() discovery.Member {\n\treturn r.this\n}\n\n// setNumMembers assigns the current number of members in the cluster to a variable.\nfunc (r *RoutingTable) setNumMembers() {\n\t// Calling NumMembers in every request is quite expensive.\n\t// It's rarely updated. Just call this when the membership info changed.\n\tnr := int32(r.discovery.NumMembers())\n\tatomic.StoreInt32(&r.numMembers, nr)\n}\n\nfunc (r *RoutingTable) SetNumMembersEagerly(nr int32) {\n\tatomic.StoreInt32(&r.numMembers, nr)\n}\n\nfunc (r *RoutingTable) NumMembers() int32 {\n\treturn atomic.LoadInt32(&r.numMembers)\n}\n\nfunc (r *RoutingTable) Members() *Members {\n\treturn r.members\n}\n\nfunc (r *RoutingTable) setSignature(s uint64) {\n\tatomic.StoreUint64(&r.signature, s)\n}\n\nfunc (r *RoutingTable) Signature() uint64 {\n\treturn atomic.LoadUint64(&r.signature)\n}\n\nfunc (r *RoutingTable) setOwnedPartitionCount() {\n\tvar count uint64\n\tfor partID := uint64(0); partID < r.config.PartitionCount; partID++ {\n\t\tpart := r.primary.PartitionByID(partID)\n\t\tif part.Owner().CompareByID(r.this) {\n\t\t\tcount++\n\t\t}\n\t}\n\tatomic.StoreUint64(&r.ownedPartitionCount, count)\n}\n\nfunc (r *RoutingTable) OwnedPartitionCount() uint64 {\n\treturn atomic.LoadUint64(&r.ownedPartitionCount)\n}\n\nfunc (r *RoutingTable) CheckMemberCountQuorum() error {\n\t// This type of quorum function determines the presence of quorum based on the count of members in the cluster,\n\t// as observed by the local member’s cluster membership manager\n\tif r.config.MemberCountQuorum > r.NumMembers() {\n\t\treturn ErrClusterQuorum\n\t}\n\treturn nil\n}\n\nfunc (r *RoutingTable) markBootstrapped() {\n\t// Bootstrapped by the coordinator.\n\tatomic.StoreInt32(&r.bootstrapped, 1)\n}\n\nfunc (r *RoutingTable) IsBootstrapped() bool {\n\t// Bootstrapped by the coordinator.\n\treturn atomic.LoadInt32(&r.bootstrapped) == 1\n}\n\n// CheckBootstrap is called for every request and checks whether the node is bootstrapped.\n// It has to be very fast for a smooth operation.\nfunc (r *RoutingTable) CheckBootstrap() error {\n\t// Prevent creating expensive structures for every request,\n\t// Just check an integer value atomically.\n\tif r.IsBootstrapped() {\n\t\treturn nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), r.config.BootstrapTimeout)\n\tdefer cancel()\n\treturn r.tryWithInterval(ctx, 100*time.Millisecond, func() error {\n\t\tif r.IsBootstrapped() {\n\t\t\treturn nil\n\t\t}\n\t\t// Final error\n\t\treturn ErrOperationTimeout\n\t})\n}\n\nfunc (r *RoutingTable) fillRoutingTable() {\n\tif r.config.ReplicaCount > int(r.NumMembers()) {\n\t\tr.log.V(1).Printf(\"[WARN] Desired replica count is %d and \"+\n\t\t\t\"the cluster has %d members currently\",\n\t\t\tr.config.ReplicaCount, r.NumMembers())\n\t}\n\ttable := make(map[uint64]*route)\n\tfor partID := uint64(0); partID < r.config.PartitionCount; partID++ {\n\t\trt := &route{\n\t\t\tOwners: r.distributePrimaryCopies(partID),\n\t\t}\n\t\tif r.config.ReplicaCount > config.MinimumReplicaCount {\n\t\t\trt.Backups = r.distributeBackups(partID)\n\t\t}\n\t\ttable[partID] = rt\n\t}\n\tr.table = table\n}\n\nfunc (r *RoutingTable) UpdateEagerly() {\n\tr.updateRouting()\n}\n\nfunc (r *RoutingTable) updateRouting() {\n\t// This function is called by listenMemberlistEvents and updateRoutingPeriodically\n\t// So this lock prevents parallel execution.\n\tr.Lock()\n\tdefer r.Unlock()\n\n\t// This function is only run by the cluster coordinator.\n\tif !r.discovery.IsCoordinator() {\n\t\treturn\n\t}\n\n\t// This type of quorum function determines the presence of quorum based on the count of members in the cluster,\n\t// as observed by the local member’s cluster membership manager\n\tif err := r.CheckMemberCountQuorum(); err != nil {\n\t\tr.log.V(2).Printf(\"[ERROR] Impossible to calculate and update routing table: %v\", err)\n\t\treturn\n\t}\n\n\tr.fillRoutingTable()\n\treports, err := r.updateRoutingTableOnCluster()\n\tif err != nil {\n\t\tr.log.V(2).Printf(\"[ERROR] Failed to update routing table on cluster: %v\", err)\n\t\treturn\n\t}\n\tr.processLeftOverDataReports(reports)\n}\n\nfunc (r *RoutingTable) processClusterEvent(event *discovery.ClusterEvent) {\n\tr.Members().Lock()\n\tdefer r.Members().Unlock()\n\n\tmember, _ := discovery.NewMemberFromMetadata(event.NodeMeta)\n\n\tswitch event.Event {\n\tcase memberlist.NodeJoin:\n\t\tr.Members().Add(member)\n\t\tr.consistent.Add(member)\n\t\tr.log.V(2).Printf(\"[INFO] Node joined: %s\", member)\n\n\t\tif r.config.EnableClusterEventsChannel {\n\t\t\tr.wg.Add(1)\n\t\t\tgo r.publishNodeJoinEvent(&member)\n\t\t}\n\tcase memberlist.NodeLeave:\n\t\tif _, err := r.Members().Get(member.ID); err != nil {\n\t\t\tr.log.V(2).Printf(\"[ERROR] Unknown node left: %s: %d\", event.NodeName, member.ID)\n\t\t\treturn\n\t\t}\n\t\tr.Members().Delete(member.ID)\n\t\tr.consistent.Remove(event.NodeName)\n\t\t// Don't try to used closed sockets again.\n\t\tr.log.V(2).Printf(\"[INFO] Node left: %s\", event.NodeName)\n\t\tif err := r.client.Close(event.NodeName); err != nil {\n\t\t\tr.log.V(2).Printf(\"[ERROR] Failed to remove the node from pool %s: %v\", event.NodeName, err)\n\t\t}\n\n\t\tif r.config.EnableClusterEventsChannel {\n\t\t\tr.wg.Add(1)\n\t\t\tgo r.publishNodeLeftEvent(&member)\n\t\t}\n\tcase memberlist.NodeUpdate:\n\t\t// Node's birthdate may be changed. Close the pool and re-add to the hash ring.\n\t\t// This takes linear time, but member count should be too small for a decent computer!\n\t\tr.Members().Range(func(id uint64, item discovery.Member) bool {\n\t\t\tif member.CompareByName(item) {\n\t\t\t\tr.Members().Delete(id)\n\t\t\t\tr.consistent.Remove(event.NodeName)\n\t\t\t\tif err := r.client.Close(event.NodeName); err != nil {\n\t\t\t\t\tr.log.V(2).Printf(\"[ERROR] Failed to remove the node from pool %s: %v\", event.NodeName, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\t\tr.Members().Add(member)\n\t\tr.consistent.Add(member)\n\t\tr.log.V(2).Printf(\"[INFO] Node updated: %s\", member)\n\tdefault:\n\t\tr.log.V(2).Printf(\"[ERROR] Unknown event received: %v\", event)\n\t\treturn\n\t}\n\n\t// Store the current number of members in the member list.\n\t// We need this to implement a simple split-brain protection algorithm.\n\tr.setNumMembers()\n}\n\nfunc (r *RoutingTable) listenClusterEvents(eventCh chan *discovery.ClusterEvent) {\n\tdefer r.wg.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-r.ctx.Done():\n\t\t\treturn\n\t\tcase e := <-eventCh:\n\t\t\tr.processClusterEvent(e)\n\t\t\tr.updateRouting()\n\t\t}\n\t}\n}\n\nfunc (r *RoutingTable) pushPeriodically() {\n\tdefer r.wg.Done()\n\n\tticker := time.NewTicker(r.pushPeriod)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-r.ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tr.updateRouting()\n\t\t}\n\t}\n}\n\nfunc (r *RoutingTable) Join() error {\n\terr := r.discovery.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = r.attemptToJoin()\n\tif errors.Is(err, ErrClusterJoin) {\n\t\tr.log.V(1).Printf(\"[INFO] Forming a new Olric cluster\")\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tthis, err := r.discovery.FindMemberByName(r.config.MemberlistConfig.Name)\n\tif err != nil {\n\t\tr.log.V(2).Printf(\"[ERROR] Failed to get this node in cluster: %v\", err)\n\t\tshutdownError := r.discovery.Shutdown()\n\t\tif shutdownError != nil {\n\t\t\treturn shutdownError\n\t\t}\n\t\treturn err\n\t}\n\tr.this = this\n\tclose(r.joined)\n\treturn nil\n}\n\nfunc (r *RoutingTable) Start() error {\n\tselect {\n\tcase <-r.joined:\n\t\t// It's time to start the routing table service. Otherwise, this method will return an error.\n\tdefault:\n\t\t// Not yet, or the join process has failed\n\t\treturn ErrNotJoinedYet\n\t}\n\n\t// Store the current number of members in the member list.\n\t// We need this to implement a simple split-brain protection algorithm.\n\tr.setNumMembers()\n\n\tr.wg.Add(1)\n\tgo r.listenClusterEvents(r.discovery.ClusterEvents)\n\n\t// 1 Hour\n\tctx, cancel := context.WithTimeout(r.ctx, time.Hour)\n\tdefer cancel()\n\terr := r.tryWithInterval(ctx, time.Second, func() error {\n\t\t// Check member count quorum now. If there are not enough peers to work, wait forever.\n\t\terr := r.CheckMemberCountQuorum()\n\t\tif err != nil {\n\t\t\tr.log.V(2).Printf(\"[ERROR] Inoperable node: %v\", err)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.Members().Lock()\n\tr.Members().Add(r.this)\n\tr.Members().Unlock()\n\n\tr.consistent.Add(r.this)\n\n\tif r.discovery.IsCoordinator() {\n\t\terr = r.bootstrapCoordinator()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tr.wg.Add(1)\n\tgo r.pushPeriodically()\n\n\tif r.config.MemberlistInterface != \"\" {\n\t\tr.log.V(2).Printf(\"[INFO] Memberlist uses interface: %s\", r.config.MemberlistInterface)\n\t}\n\tr.log.V(2).Printf(\"[INFO] Memberlist bindAddr: %s, bindPort: %d\", r.config.MemberlistConfig.BindAddr, r.config.MemberlistConfig.BindPort)\n\tr.log.V(2).Printf(\"[INFO] Cluster coordinator: %s\", r.discovery.GetCoordinator())\n\tcheckpoint.Pass()\n\treturn nil\n}\n\nfunc (r *RoutingTable) Shutdown(ctx context.Context) error {\n\tselect {\n\tcase <-r.ctx.Done():\n\t\t// already closed\n\t\treturn nil\n\tdefault:\n\t}\n\n\tif err := r.discovery.Shutdown(); err != nil {\n\t\treturn err\n\t}\n\n\tr.cancel()\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tr.wg.Wait()\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\terr := ctx.Err()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase <-done:\n\t}\n\treturn nil\n}\n\nvar _ service.Service = (*RoutingTable)(nil)\n"
  },
  {
    "path": "internal/cluster/routingtable/routingtable_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/hashicorp/memberlist\"\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/environment\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\nfunc newRoutingTableForTest(c *config.Config, srv *server.Server) *RoutingTable {\n\te := environment.New()\n\te.Set(\"config\", c)\n\te.Set(\"logger\", testutil.NewFlogger(c))\n\te.Set(\"primary\", partitions.New(c.PartitionCount, partitions.PRIMARY))\n\te.Set(\"backup\", partitions.New(c.PartitionCount, partitions.BACKUP))\n\te.Set(\"client\", server.NewClient(c.Client))\n\te.Set(\"server\", srv)\n\n\trt := New(e)\n\tgo func() {\n\t\terr := srv.ListenAndServe()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"ListenAndServe returned an error: %v\", err))\n\t\t}\n\t}()\n\t<-srv.StartedCtx.Done()\n\n\treturn rt\n}\n\ntype testCluster struct {\n\tpeerPorts []int\n\terrGr     errgroup.Group\n\tctx       context.Context\n\tcancel    context.CancelFunc\n}\n\nfunc newTestCluster() *testCluster {\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &testCluster{\n\t\tctx:    ctx,\n\t\tcancel: cancel,\n\t}\n}\n\nfunc (t *testCluster) addNode(c *config.Config) (*RoutingTable, error) {\n\tif c == nil {\n\t\tc = testutil.NewConfig()\n\t}\n\tport, err := testutil.GetFreePort()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.MemberlistConfig.BindPort = port\n\n\tvar peers []string\n\tfor _, peerPort := range t.peerPorts {\n\t\tpeers = append(peers, net.JoinHostPort(\"127.0.0.1\", strconv.Itoa(peerPort)))\n\t}\n\tc.Peers = peers\n\n\tsrv := testutil.NewServer(c)\n\trt := newRoutingTableForTest(c, srv)\n\terr = rt.Join()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = rt.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt.errGr.Go(func() error {\n\t\t<-t.ctx.Done()\n\t\treturn srv.Shutdown(context.Background())\n\t})\n\n\tt.errGr.Go(func() error {\n\t\t<-t.ctx.Done()\n\t\treturn rt.Shutdown(context.Background())\n\t})\n\n\tt.peerPorts = append(t.peerPorts, port)\n\treturn rt, err\n}\n\nfunc (t *testCluster) shutdown() error {\n\tt.cancel()\n\treturn t.errGr.Wait()\n}\n\nfunc TestRoutingTable_SingleNode(t *testing.T) {\n\tcluster := newTestCluster()\n\tdefer cluster.cancel()\n\n\tc := testutil.NewConfig()\n\trt, err := cluster.addNode(c)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tif !rt.This().CompareByID(rt.Discovery().GetCoordinator()) {\n\t\tt.Fatalf(\"Coordinator is different\")\n\t}\n\n\tif !rt.IsBootstrapped() {\n\t\tt.Fatalf(\"The coordinator node cannot be bootstrapped\")\n\t}\n\n\tfor partID := uint64(0); partID < c.PartitionCount; partID++ {\n\t\tpart := rt.primary.PartitionByID(partID)\n\t\tif !part.Owner().CompareByID(rt.This()) {\n\t\t\tt.Fatalf(\"PartID: %d has a different owner\", partID)\n\t\t}\n\t}\n\n\tif rt.Signature() == 0 {\n\t\tt.Fatalf(\"routingTable.signature is zero\")\n\t}\n\n\tif rt.OwnedPartitionCount() != c.PartitionCount {\n\t\tt.Fatalf(\"Expected owned partition count: %d. Got: %d\", rt.OwnedPartitionCount(), c.PartitionCount)\n\t}\n}\n\nfunc TestRoutingTable_Cluster(t *testing.T) {\n\tcluster := newTestCluster()\n\tdefer cluster.cancel()\n\n\tc1 := testutil.NewConfig()\n\trt1, err := cluster.addNode(c1)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tif !rt1.IsBootstrapped() {\n\t\tt.Fatalf(\"The coordinator node cannot be bootstrapped\")\n\t}\n\n\tfirstSignature := rt1.Signature()\n\n\tc2 := testutil.NewConfig()\n\trt2, err := cluster.addNode(c2)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\terr = testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\tif !rt2.IsBootstrapped() {\n\t\t\treturn errors.New(\"the second node cannot be bootstrapped\")\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tif !rt2.Discovery().GetCoordinator().CompareByID(rt1.Discovery().GetCoordinator()) {\n\t\tt.Fatalf(\"Coordinator is different\")\n\t}\n\n\tif firstSignature == rt2.Signature() {\n\t\tt.Fatalf(\"routingTable signature did not changed after node join\")\n\t}\n\n\tif rt1.OwnedPartitionCount() == c1.PartitionCount {\n\t\tt.Fatalf(\"rt1 has all the partitions\")\n\t}\n\n\tif rt2.OwnedPartitionCount() == c2.PartitionCount {\n\t\tt.Fatalf(\"rt2 has all the partitions\")\n\t}\n\n\ttotalPartitionCount := rt1.OwnedPartitionCount() + rt2.OwnedPartitionCount()\n\tif totalPartitionCount != c1.PartitionCount {\n\t\tt.Fatalf(\"Total partition count is wrong: %d\", totalPartitionCount)\n\t}\n\n\terr = cluster.shutdown()\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n}\n\nfunc TestRoutingTable_CheckPartitionOwnership(t *testing.T) {\n\tcluster := newTestCluster()\n\tdefer cluster.cancel()\n\n\tc1 := testutil.NewConfig()\n\trt1, err := cluster.addNode(c1)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tif !rt1.IsBootstrapped() {\n\t\tt.Fatalf(\"The coordinator node cannot be bootstrapped\")\n\t}\n\n\tc2 := testutil.NewConfig()\n\trt2, err := cluster.addNode(c2)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\terr = testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\tif !rt2.IsBootstrapped() {\n\t\t\treturn errors.New(\"the second node cannot be bootstrapped\")\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tfor partID := uint64(0); partID < c1.PartitionCount; partID++ {\n\t\townerOne := rt1.primary.PartitionByID(partID).Owner()\n\t\townerTwo := rt2.primary.PartitionByID(partID).Owner()\n\t\tif !ownerOne.CompareByID(ownerTwo) {\n\t\t\tt.Fatalf(\"Different partition: %d owner: %s != %s\", partID, ownerOne, ownerTwo)\n\t\t}\n\t}\n\n\terr = cluster.shutdown()\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n}\n\nfunc TestRoutingTable_NodeLeave(t *testing.T) {\n\tcluster := newTestCluster()\n\tdefer cluster.cancel()\n\n\tc1 := testutil.NewConfig()\n\trt1, err := cluster.addNode(c1)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tif !rt1.IsBootstrapped() {\n\t\tt.Fatalf(\"The coordinator node cannot be bootstrapped\")\n\t}\n\n\tc2 := testutil.NewConfig()\n\trt2, err := cluster.addNode(c2)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\terr = testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\tif !rt2.IsBootstrapped() {\n\t\t\treturn errors.New(\"the second node cannot be bootstrapped\")\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tsignatureWithTwoNode := rt1.Signature()\n\terr = rt1.Shutdown(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\terr = testutil.TryWithInterval(50, 100*time.Millisecond, func() error {\n\t\tif rt2.Signature() == signatureWithTwoNode {\n\t\t\treturn errors.New(\"still has the same signature\")\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tif !rt2.Discovery().GetCoordinator().CompareByID(rt2.This()) {\n\t\tt.Fatalf(\"Coordinator is different\")\n\t}\n\n\tfor partID := uint64(0); partID < c2.PartitionCount; partID++ {\n\t\tpart := rt2.primary.PartitionByID(partID)\n\t\tif !part.Owner().CompareByID(rt2.This()) {\n\t\t\tt.Fatalf(\"PartID: %d has a different owner\", partID)\n\t\t}\n\t}\n\n\terr = cluster.shutdown()\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n}\n\nfunc TestRoutingTable_NodeUpdate(t *testing.T) {\n\tcluster := newTestCluster()\n\tdefer cluster.cancel()\n\n\tc1 := testutil.NewConfig()\n\trt1, err := cluster.addNode(c1)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tif !rt1.IsBootstrapped() {\n\t\tt.Fatalf(\"The coordinator node cannot be bootstrapped\")\n\t}\n\n\tc2 := testutil.NewConfig()\n\trt2, err := cluster.addNode(c2)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\terr = testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\tif !rt2.IsBootstrapped() {\n\t\t\treturn errors.New(\"the second node cannot be bootstrapped\")\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tn := rt2.Discovery().LocalNode()\n\tmeta, err := discovery.NewMember(c2).Encode()\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\tn.Meta = meta\n\tevent := memberlist.NodeEvent{Event: memberlist.NodeUpdate, Node: n}\n\trt2.Discovery().ClusterEvents <- discovery.ToClusterEvent(event)\n\n\terr = testutil.TryWithInterval(10, 100*time.Millisecond, func() error {\n\t\t_, err = rt1.Members().Get(rt2.This().ID)\n\t\tif err == nil {\n\t\t\t// node id is updated.\n\t\t\treturn errors.New(\"rt2 could not be updated\")\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\terr = cluster.shutdown()\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "internal/cluster/routingtable/update.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage routingtable\n\nimport (\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/vmihailenco/msgpack/v5\"\n\t\"golang.org/x/sync/errgroup\"\n\t\"golang.org/x/sync/semaphore\"\n)\n\ntype leftOverDataReport struct {\n\tPartitions []uint64\n\tBackups    []uint64\n}\n\nfunc (r *RoutingTable) prepareLeftOverDataReport() ([]byte, error) {\n\tres := leftOverDataReport{}\n\tfor partID := uint64(0); partID < r.config.PartitionCount; partID++ {\n\t\tpart := r.primary.PartitionByID(partID)\n\t\tif part.Length() != 0 {\n\t\t\tres.Partitions = append(res.Partitions, partID)\n\t\t}\n\n\t\tbackup := r.backup.PartitionByID(partID)\n\t\tif backup.Length() != 0 {\n\t\t\tres.Backups = append(res.Backups, partID)\n\t\t}\n\t}\n\treturn msgpack.Marshal(res)\n}\n\nfunc (r *RoutingTable) updateRoutingTableOnMember(data []byte, member discovery.Member) (*leftOverDataReport, error) {\n\tcmd := protocol.NewUpdateRouting(data, r.this.ID).Command(r.ctx)\n\trc := r.client.Get(member.String())\n\terr := rc.Process(r.ctx, cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treport := leftOverDataReport{}\n\terr = msgpack.Unmarshal(result, &report)\n\tif err != nil {\n\t\tr.log.V(3).Printf(\"[ERROR] Failed to call decode ownership report from %s: %v\", member, err)\n\t\treturn nil, err\n\t}\n\treturn &report, nil\n}\n\nfunc (r *RoutingTable) updateRoutingTableOnCluster() (map[discovery.Member]*leftOverDataReport, error) {\n\tdata, err := msgpack.Marshal(r.table)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mtx sync.Mutex\n\tvar g errgroup.Group\n\treports := make(map[discovery.Member]*leftOverDataReport)\n\tnum := int64(runtime.NumCPU())\n\tsem := semaphore.NewWeighted(num)\n\n\tr.Members().RLock()\n\tr.Members().Range(func(id uint64, tmp discovery.Member) bool {\n\t\tmember := tmp\n\t\tg.Go(func() error {\n\t\t\tif err := sem.Acquire(r.ctx, 1); err != nil {\n\t\t\t\tr.log.V(3).Printf(\"[ERROR] Failed to acquire semaphore to update routing table on %s: %v\", member, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer sem.Release(1)\n\n\t\t\treport, err := r.updateRoutingTableOnMember(data, member)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmtx.Lock()\n\t\t\tdefer mtx.Unlock()\n\t\t\treports[member] = report\n\t\t\treturn nil\n\t\t})\n\t\treturn true\n\t})\n\tr.Members().RUnlock()\n\n\tif err := g.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn reports, nil\n}\n"
  },
  {
    "path": "internal/discovery/delegate.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage discovery\n\n// delegate is a struct which implements memberlist.Delegate interface.\ntype delegate struct {\n\tmeta []byte\n}\n\n// newDelegate returns a new delegate instance.\nfunc (d *Discovery) newDelegate() (delegate, error) {\n\tdata, err := d.member.Encode()\n\tif err != nil {\n\t\treturn delegate{}, err\n\t}\n\treturn delegate{\n\t\tmeta: data,\n\t}, nil\n}\n\n// NodeMeta is used to retrieve meta-data about the current node\n// when broadcasting an alive message. It's length is limited to\n// the given byte size. This metadata is available in the Node structure.\nfunc (d delegate) NodeMeta(limit int) []byte {\n\treturn d.meta\n}\n\n// NotifyMsg is called when a user-data message is received.\nfunc (d delegate) NotifyMsg(data []byte) {}\n\n// GetBroadcasts is called when user data messages can be broadcast.\nfunc (d delegate) GetBroadcasts(overhead, limit int) [][]byte { return nil }\n\n// LocalState is used for a TCP Push/Pull.\nfunc (d delegate) LocalState(join bool) []byte { return nil }\n\n// MergeRemoteState is invoked after a TCP Push/Pull.\nfunc (d delegate) MergeRemoteState(buf []byte, join bool) {}\n"
  },
  {
    "path": "internal/discovery/discovery.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n/*Package discovery provides a basic memberlist integration.*/\npackage discovery\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"plugin\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/hashicorp/memberlist\"\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/stats\"\n\t\"github.com/olric-data/olric/pkg/flog\"\n\t\"github.com/olric-data/olric/pkg/service_discovery\"\n)\n\nconst eventChanCapacity = 256\n\n// UptimeSeconds is number of seconds since the server started.\nvar UptimeSeconds = stats.NewInt64Counter()\n\n// ErrMemberNotFound indicates that the requested member could not be found in the member list.\nvar ErrMemberNotFound = errors.New(\"member not found\")\n\n// ClusterEvent is a single event related to node activity in the memberlist.\n// The Node member of this struct must not be directly modified.\ntype ClusterEvent struct {\n\tEvent    memberlist.NodeEventType\n\tNodeName string\n\tNodeAddr net.IP\n\tNodePort uint16\n\tNodeMeta []byte // Metadata from the delegate for this node.\n}\n\nfunc (c *ClusterEvent) MemberAddr() string {\n\tport := strconv.Itoa(int(c.NodePort))\n\treturn net.JoinHostPort(c.NodeAddr.String(), port)\n}\n\n// Discovery is a structure that encapsulates memberlist and\n// provides useful functions to utilize it.\ntype Discovery struct {\n\tlog        *flog.Logger\n\tmember     *Member\n\tmemberlist *memberlist.Memberlist\n\tconfig     *config.Config\n\n\t// To manage Join/Leave/Update events\n\tclusterEventsMtx sync.RWMutex\n\tClusterEvents    chan *ClusterEvent\n\n\t// Try to reconnect dead members\n\teventSubscribers []chan *ClusterEvent\n\tserviceDiscovery service_discovery.ServiceDiscovery\n\n\t// Flow control\n\twg     sync.WaitGroup\n\tctx    context.Context\n\tcancel context.CancelFunc\n}\n\n// New creates a new memberlist with a proper configuration and returns a new Discovery instance along with it.\nfunc New(log *flog.Logger, c *config.Config) *Discovery {\n\tmember := NewMember(c)\n\tctx, cancel := context.WithCancel(context.Background())\n\td := &Discovery{\n\t\tmember: &member,\n\t\tconfig: c,\n\t\tlog:    log,\n\t\tctx:    ctx,\n\t\tcancel: cancel,\n\t}\n\treturn d\n}\n\nfunc (d *Discovery) loadServiceDiscoveryPlugin() error {\n\tvar sd service_discovery.ServiceDiscovery\n\n\tif val, ok := d.config.ServiceDiscovery[\"plugin\"]; ok {\n\t\tif sd, ok = val.(service_discovery.ServiceDiscovery); !ok {\n\t\t\treturn fmt.Errorf(\"plugin type %T is not a ServiceDiscovery interface\", val)\n\t\t}\n\t} else {\n\t\tpluginPath, ok := d.config.ServiceDiscovery[\"path\"]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"plugin path could not be found\")\n\t\t}\n\t\tplug, err := plugin.Open(pluginPath.(string))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to open plugin: %w\", err)\n\t\t}\n\n\t\tsymDiscovery, err := plug.Lookup(\"ServiceDiscovery\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to lookup serviceDiscovery symbol: %w\", err)\n\t\t}\n\n\t\tif sd, ok = symDiscovery.(service_discovery.ServiceDiscovery); !ok {\n\t\t\treturn fmt.Errorf(\"unable to assert type to serviceDiscovery\")\n\t\t}\n\t}\n\n\tif err := sd.SetConfig(d.config.ServiceDiscovery); err != nil {\n\t\treturn err\n\t}\n\tsd.SetLogger(d.config.Logger)\n\tif err := sd.Initialize(); err != nil {\n\t\treturn err\n\t}\n\n\td.serviceDiscovery = sd\n\treturn nil\n}\n\n// increaseUptimeSeconds calls UptimeSeconds.Increase function every second.\nfunc (d *Discovery) increaseUptimeSeconds() {\n\tdefer d.wg.Done()\n\n\tticker := time.NewTicker(time.Second)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tUptimeSeconds.Increase(1)\n\t\tcase <-d.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d *Discovery) Start() error {\n\tif d.config.ServiceDiscovery != nil {\n\t\tif err := d.loadServiceDiscoveryPlugin(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// ClusterEvents chan is consumed by the Olric package to maintain a consistent hash ring.\n\td.ClusterEvents = d.SubscribeNodeEvents()\n\n\t// Initialize a new memberlist\n\tdl, err := d.newDelegate()\n\tif err != nil {\n\t\treturn err\n\t}\n\teventsCh := make(chan memberlist.NodeEvent, eventChanCapacity)\n\td.config.MemberlistConfig.Delegate = dl\n\td.config.MemberlistConfig.Logger = d.config.Logger\n\td.config.MemberlistConfig.Events = &memberlist.ChannelEventDelegate{\n\t\tCh: eventsCh,\n\t}\n\tlist, err := memberlist.Create(d.config.MemberlistConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.memberlist = list\n\n\tif d.serviceDiscovery != nil {\n\t\tif err := d.serviceDiscovery.Register(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\td.wg.Add(1)\n\tgo d.eventLoop(eventsCh)\n\n\td.wg.Add(1)\n\tgo d.increaseUptimeSeconds()\n\n\treturn nil\n}\n\n// Join is used to take an existing Memberlist and attempt to Join a cluster\n// by contacting all the given hosts and performing a state sync. Initially,\n// the Memberlist only contains our own state, so doing this will cause remote\n// nodes to become aware of the existence of this node, effectively joining the cluster.\nfunc (d *Discovery) Join() (int, error) {\n\tif d.serviceDiscovery != nil {\n\t\tpeers, err := d.serviceDiscovery.DiscoverPeers()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn d.memberlist.Join(peers)\n\t}\n\treturn d.memberlist.Join(d.config.Peers)\n}\n\nfunc (d *Discovery) Rejoin(peers []string) (int, error) {\n\treturn d.memberlist.Join(peers)\n}\n\n// GetMembers returns a full list of known alive nodes.\nfunc (d *Discovery) GetMembers() []Member {\n\tvar members []Member\n\tnodes := d.memberlist.Members()\n\tfor _, node := range nodes {\n\t\tmember, _ := NewMemberFromMetadata(node.Meta)\n\t\tmembers = append(members, member)\n\t}\n\n\t// sort members by birthdate\n\tsort.Slice(members, func(i int, j int) bool {\n\t\treturn members[i].Birthdate < members[j].Birthdate\n\t})\n\treturn members\n}\n\nfunc (d *Discovery) NumMembers() int {\n\treturn d.memberlist.NumMembers()\n}\n\n// FindMemberByName finds and returns an alive member.\nfunc (d *Discovery) FindMemberByName(name string) (Member, error) {\n\tmembers := d.GetMembers()\n\tfor _, member := range members {\n\t\tif member.Name == name {\n\t\t\treturn member, nil\n\t\t}\n\t}\n\treturn Member{}, ErrMemberNotFound\n}\n\n// FindMemberByID finds and returns an alive member.\nfunc (d *Discovery) FindMemberByID(id uint64) (Member, error) {\n\tmembers := d.GetMembers()\n\tfor _, member := range members {\n\t\tif member.ID == id {\n\t\t\treturn member, nil\n\t\t}\n\t}\n\treturn Member{}, ErrMemberNotFound\n}\n\n// GetCoordinator returns the oldest node in the memberlist.\nfunc (d *Discovery) GetCoordinator() Member {\n\tmembers := d.GetMembers()\n\tif len(members) == 0 {\n\t\td.log.V(1).Printf(\"[ERROR] There is no member in memberlist\")\n\t\treturn Member{}\n\t}\n\treturn members[0]\n}\n\n// IsCoordinator returns true if the caller is the coordinator node.\nfunc (d *Discovery) IsCoordinator() bool {\n\treturn d.GetCoordinator().ID == d.member.ID\n}\n\n// LocalNode is used to return the local Node\nfunc (d *Discovery) LocalNode() *memberlist.Node {\n\treturn d.memberlist.LocalNode()\n}\n\n// Shutdown will stop any background maintenance of network activity\n// for this memberlist, causing it to appear \"dead\". A leave message\n// will not be broadcasted prior, so the cluster being left will have\n// to detect this node's Shutdown using probing. If you wish to more\n// gracefully exit the cluster, call Leave prior to shutting down.\n//\n// This method is safe to call multiple times.\nfunc (d *Discovery) Shutdown() error {\n\tselect {\n\tcase <-d.ctx.Done():\n\t\treturn nil\n\tdefault:\n\t}\n\td.cancel()\n\t// We don't do that in a goroutine with a timeout mechanism\n\t// because this mechanism may cause goroutine leak.\n\td.wg.Wait()\n\n\tif d.memberlist != nil {\n\t\t// Leave will broadcast a leave message but will not shutdown the background\n\t\t// listeners, meaning the node will continue participating in gossip and state\n\t\t// updates.\n\t\td.log.V(2).Printf(\"[INFO] Broadcasting a leave message\")\n\t\tif err := d.memberlist.Leave(d.config.LeaveTimeout); err != nil {\n\t\t\td.log.V(3).Printf(\"[WARN] memberlist.Leave returned an error: %v\", err)\n\t\t}\n\t}\n\n\tif d.serviceDiscovery != nil {\n\t\tdefer func(serviceDiscovery service_discovery.ServiceDiscovery) {\n\t\t\terr := serviceDiscovery.Close()\n\t\t\tif err != nil {\n\t\t\t\td.log.V(3).Printf(\"[ERROR] ServiceDiscovery.Close returned an error: %v\", err)\n\t\t\t}\n\t\t}(d.serviceDiscovery)\n\n\t\tif err := d.serviceDiscovery.Deregister(); err != nil {\n\t\t\td.log.V(3).Printf(\"[ERROR] ServiceDiscovery.Deregister returned an error: %v\", err)\n\t\t}\n\t}\n\n\tif d.memberlist != nil {\n\t\treturn d.memberlist.Shutdown()\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "internal/discovery/discovery_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage discovery\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/hashicorp/memberlist\"\n\t\"github.com/olric-data/olric/pkg/service_discovery\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\ntype testCluster struct {\n\tmtx       sync.RWMutex\n\tinstances []*Discovery\n\tmembers   []string\n}\n\nfunc newTestCluster(t *testing.T) *testCluster {\n\ttc := &testCluster{}\n\tt.Cleanup(func() {\n\t\ttc.mtx.Lock()\n\t\tdefer tc.mtx.Unlock()\n\n\t\tfor _, instance := range tc.instances {\n\t\t\trequire.NoError(t, instance.Shutdown())\n\t\t}\n\t})\n\treturn tc\n}\n\nfunc (tc *testCluster) addNewMember(t *testing.T) *Discovery {\n\ttc.mtx.Lock()\n\tdefer tc.mtx.Unlock()\n\n\tcfg := testutil.NewConfig()\n\tfor _, peer := range tc.members {\n\t\tcfg.Peers = append(cfg.Peers, peer)\n\t}\n\n\tflogger := testutil.NewFlogger(cfg)\n\td := New(flogger, cfg)\n\terr := d.Start()\n\trequire.NoError(t, err)\n\n\t_, err = d.Join()\n\trequire.NoError(t, err)\n\n\ttc.instances = append(tc.instances, d)\n\taddr := net.JoinHostPort(\n\t\td.config.MemberlistConfig.BindAddr,\n\t\tstrconv.Itoa(d.config.MemberlistConfig.BindPort),\n\t)\n\ttc.members = append(tc.members, addr)\n\n\treturn d\n}\n\nfunc TestDiscovery_GetCoordinator(t *testing.T) {\n\tc := newTestCluster(t)\n\td1 := c.addNewMember(t)\n\td2 := c.addNewMember(t)\n\n\trequire.Equal(t, d1.GetCoordinator(), d2.GetCoordinator())\n}\n\nfunc TestDiscovery_GetMembers(t *testing.T) {\n\tc := newTestCluster(t)\n\td1 := c.addNewMember(t)\n\tc.addNewMember(t)\n\tc.addNewMember(t)\n\n\trequire.Len(t, d1.GetMembers(), 3)\n}\n\nfunc TestDiscovery_IsCoordinator(t *testing.T) {\n\tc := newTestCluster(t)\n\td1 := c.addNewMember(t)\n\n\t<-time.After(100 * time.Millisecond)\n\td2 := c.addNewMember(t)\n\n\t<-time.After(100 * time.Millisecond)\n\td3 := c.addNewMember(t)\n\n\trequire.True(t, d1.IsCoordinator())\n\trequire.False(t, d2.IsCoordinator())\n\trequire.False(t, d3.IsCoordinator())\n}\n\nfunc TestDiscovery_NumMembers(t *testing.T) {\n\tc := newTestCluster(t)\n\td1 := c.addNewMember(t)\n\tc.addNewMember(t)\n\tc.addNewMember(t)\n\n\trequire.Equal(t, d1.NumMembers(), 3)\n}\n\nfunc TestDiscovery_LocalNode(t *testing.T) {\n\tc := newTestCluster(t)\n\td1 := c.addNewMember(t)\n\n\trequire.Equal(t, d1.LocalNode().Name, d1.config.MemberlistConfig.Name)\n}\n\nfunc TestDiscovery_FindMemberByID(t *testing.T) {\n\tc := newTestCluster(t)\n\tc.addNewMember(t)\n\tc.addNewMember(t)\n\tc.addNewMember(t)\n\n\tfor i, instance := range c.instances {\n\t\tm, err := instance.FindMemberByID(c.instances[i].member.ID)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, m.Name, instance.config.MemberlistConfig.Name)\n\t}\n}\n\nfunc TestDiscovery_FindMemberByName(t *testing.T) {\n\tc := newTestCluster(t)\n\tc.addNewMember(t)\n\tc.addNewMember(t)\n\tc.addNewMember(t)\n\n\tfor i, instance := range c.instances {\n\t\tm, err := instance.FindMemberByName(c.instances[i].member.Name)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, m.Name, instance.config.MemberlistConfig.Name)\n\t}\n}\n\nfunc TestDiscovery_increaseUptimeSeconds(t *testing.T) {\n\tc := newTestCluster(t)\n\tc.addNewMember(t)\n\n\t<-time.After(2 * time.Second)\n\n\trequire.Greater(t, UptimeSeconds.Read(), int64(0))\n}\n\ntype dummyServiceDiscovery struct {\n\tmtx sync.Mutex\n\n\tinitialized   bool\n\tclosed        bool\n\tsetLogger     bool\n\tsetConfig     bool\n\tregister      bool\n\tdiscoverPeers bool\n\tderegister    bool\n\tlog           *log.Logger\n}\n\nfunc (d *dummyServiceDiscovery) Initialize() error {\n\td.mtx.Lock()\n\tdefer d.mtx.Unlock()\n\td.initialized = true\n\n\treturn nil\n}\n\nfunc (d *dummyServiceDiscovery) SetConfig(_ map[string]interface{}) error {\n\td.mtx.Lock()\n\tdefer d.mtx.Unlock()\n\td.setConfig = true\n\n\treturn nil\n}\n\nfunc (d *dummyServiceDiscovery) SetLogger(_ *log.Logger) {\n\td.mtx.Lock()\n\tdefer d.mtx.Unlock()\n\n\td.setLogger = true\n}\n\nfunc (d *dummyServiceDiscovery) Register() error {\n\td.mtx.Lock()\n\tdefer d.mtx.Unlock()\n\n\td.register = true\n\n\treturn nil\n}\n\nfunc (d *dummyServiceDiscovery) Deregister() error {\n\td.mtx.Lock()\n\tdefer d.mtx.Unlock()\n\td.deregister = true\n\n\treturn nil\n}\n\nfunc (d *dummyServiceDiscovery) DiscoverPeers() ([]string, error) {\n\td.mtx.Lock()\n\tdefer d.mtx.Unlock()\n\td.discoverPeers = true\n\n\treturn []string{}, nil\n}\n\nfunc (d *dummyServiceDiscovery) Close() error {\n\td.mtx.Lock()\n\tdefer d.mtx.Unlock()\n\td.closed = true\n\n\treturn nil\n}\n\nvar _ service_discovery.ServiceDiscovery = (*dummyServiceDiscovery)(nil)\n\nfunc TestDiscovery_loadServiceDiscoveryPlugin(t *testing.T) {\n\tc := testutil.NewConfig()\n\n\tsd := &dummyServiceDiscovery{}\n\tc.ServiceDiscovery = map[string]interface{}{\n\t\t\"plugin\":   sd,\n\t\t\"provider\": \"dummy\",\n\t\t\"args\":     fmt.Sprintf(\"namespace=%s label_selector=\\\"%s\\\"\", \"foo_namespace\", \"foo_label_selector\"),\n\t}\n\n\tf := testutil.NewFlogger(c)\n\td := New(f, c)\n\terr := d.Start()\n\trequire.NoError(t, err)\n\n\t_, err = d.Join()\n\trequire.NoError(t, err)\n\n\trequire.True(t, sd.initialized)\n\trequire.True(t, sd.setConfig)\n\trequire.True(t, sd.setLogger)\n\trequire.True(t, sd.register)\n\trequire.True(t, sd.discoverPeers)\n}\n\nfunc TestDiscovery_ClusterEvents(t *testing.T) {\n\tc := newTestCluster(t)\n\td1 := c.addNewMember(t)\n\td2 := c.addNewMember(t)\n\td3 := c.addNewMember(t)\n\n\tvar members []string\nloop:\n\tfor {\n\t\tselect {\n\t\tcase e := <-d1.ClusterEvents:\n\t\t\trequire.Equal(t, memberlist.NodeJoin, e.Event)\n\t\t\tmembers = append(members, e.MemberAddr())\n\t\t\tif len(members) == 2 {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tbreak loop\n\t\t}\n\t}\n\n\trequire.Contains(t, members, net.JoinHostPort(d2.config.MemberlistConfig.BindAddr, strconv.Itoa(d2.config.MemberlistConfig.BindPort)))\n\trequire.Contains(t, members, net.JoinHostPort(d3.config.MemberlistConfig.BindAddr, strconv.Itoa(d3.config.MemberlistConfig.BindPort)))\n}\n"
  },
  {
    "path": "internal/discovery/events.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage discovery\n\nimport \"github.com/hashicorp/memberlist\"\n\nfunc ToClusterEvent(e memberlist.NodeEvent) *ClusterEvent {\n\treturn &ClusterEvent{\n\t\tEvent:    e.Event,\n\t\tNodeName: e.Node.Name,\n\t\tNodeAddr: e.Node.Addr,\n\t\tNodePort: e.Node.Port,\n\t\tNodeMeta: e.Node.Meta,\n\t}\n}\n\nfunc (d *Discovery) handleEvent(event memberlist.NodeEvent) {\n\td.clusterEventsMtx.RLock()\n\tdefer d.clusterEventsMtx.RUnlock()\n\n\tfor _, ch := range d.eventSubscribers {\n\t\tif event.Node.Name == d.member.Name {\n\t\t\tcontinue\n\t\t}\n\t\tch <- ToClusterEvent(event)\n\t}\n}\n\n// eventLoop awaits for messages from memberlist and broadcasts them to  event listeners.\nfunc (d *Discovery) eventLoop(eventsCh chan memberlist.NodeEvent) {\n\tdefer d.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase e := <-eventsCh:\n\t\t\td.handleEvent(e)\n\t\tcase <-d.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d *Discovery) SubscribeNodeEvents() chan *ClusterEvent {\n\td.clusterEventsMtx.Lock()\n\tdefer d.clusterEventsMtx.Unlock()\n\n\tch := make(chan *ClusterEvent, eventChanCapacity)\n\td.eventSubscribers = append(d.eventSubscribers, ch)\n\treturn ch\n}\n"
  },
  {
    "path": "internal/discovery/member.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage discovery\n\nimport (\n\t\"encoding/binary\"\n\t\"time\"\n\n\t\"github.com/cespare/xxhash/v2\"\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/vmihailenco/msgpack/v5\"\n)\n\n// Member represents a node in the cluster.\ntype Member struct {\n\tName      string\n\tNameHash  uint64\n\tID        uint64\n\tBirthdate int64\n}\n\n// CompareByID returns true if two members denote the same member in the cluster.\nfunc (m Member) CompareByID(other Member) bool {\n\t// ID variable is calculated by combining member's name and birthdate\n\treturn m.ID == other.ID\n}\n\n// CompareByName returns true if the two members has the same name in the cluster.\n// This function is intended to redirect the requests to the partition owner.\nfunc (m Member) CompareByName(other Member) bool {\n\treturn m.NameHash == other.NameHash\n}\n\nfunc (m Member) String() string {\n\treturn m.Name\n}\n\nfunc (m Member) Encode() ([]byte, error) {\n\treturn msgpack.Marshal(m)\n}\n\nfunc NewMemberFromMetadata(metadata []byte) (Member, error) {\n\tres := &Member{}\n\terr := msgpack.Unmarshal(metadata, res)\n\treturn *res, err\n}\n\nfunc MemberID(name string, birthdate int64) uint64 {\n\t// Calculate member's identity. It's useful to compare hosts.\n\tbuf := make([]byte, 8+len(name))\n\tbinary.BigEndian.PutUint64(buf, uint64(birthdate))\n\tbuf = append(buf, []byte(name)...)\n\treturn xxhash.Sum64(buf)\n}\n\nfunc NewMember(c *config.Config) Member {\n\tbirthdate := time.Now().UnixNano()\n\tnameHash := xxhash.Sum64([]byte(c.MemberlistConfig.Name))\n\treturn Member{\n\t\tName:      c.MemberlistConfig.Name,\n\t\tNameHash:  nameHash,\n\t\tID:        MemberID(c.MemberlistConfig.Name, birthdate),\n\t\tBirthdate: birthdate,\n\t}\n}\n"
  },
  {
    "path": "internal/discovery/member_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage discovery\n\nimport (\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n)\n\nfunc TestMembers(t *testing.T) {\n\tc1 := testutil.NewConfig()\n\tmember1 := NewMember(c1)\n\n\tc2 := testutil.NewConfig()\n\tmember2 := NewMember(c2)\n\n\tt.Run(\"Name\", func(t *testing.T) {\n\t\tif member1.String() != c1.MemberlistConfig.Name {\n\t\t\tt.Fatalf(\"Expected member name: %s. Got: %s\", c1.MemberlistConfig.Name, member1.Name)\n\t\t}\n\t})\n\n\tt.Run(\"CompareByID\", func(t *testing.T) {\n\t\tif !member1.CompareByID(member1) {\n\t\t\tt.Fatalf(\"members were the same\")\n\t\t}\n\n\t\tif member1.CompareByID(member2) {\n\t\t\tt.Fatalf(\"members were different\")\n\t\t}\n\t})\n\n\tt.Run(\"CompareByName\", func(t *testing.T) {\n\t\tif !member1.CompareByName(member1) {\n\t\t\tt.Fatalf(\"members were the same\")\n\t\t}\n\n\t\tif member1.CompareByName(member2) {\n\t\t\tt.Fatalf(\"members were different\")\n\t\t}\n\t})\n\n\tt.Run(\"Encode/Decode\", func(t *testing.T) {\n\t\tdata, err := member1.Encode()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\n\t\tdecoded, err := NewMemberFromMetadata(data)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\n\t\tif !member1.CompareByID(decoded) {\n\t\t\tt.Fatalf(\"Decoded member is different\")\n\t\t}\n\n\t\tif !member1.CompareByName(decoded) {\n\t\t\tt.Fatalf(\"Decoded member is different\")\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "internal/dmap/atomic.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/resp\"\n\t\"github.com/olric-data/olric/internal/util\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\nfunc (dm *DMap) loadCurrentAtomicInt(e *env) (int, int64, error) {\n\tentry, err := dm.Get(e.ctx, e.key)\n\tif errors.Is(err, ErrKeyNotFound) {\n\t\treturn 0, 0, nil\n\t}\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tif entry == nil {\n\t\treturn 0, 0, nil\n\t}\n\tnr, err := util.ParseInt(entry.Value(), 10, 64)\n\tif err != nil {\n\t\treturn 0, 0, nil\n\t}\n\treturn int(nr), entry.TTL(), nil\n}\n\nfunc (dm *DMap) atomicIncrDecr(cmd string, e *env, delta int) (int, error) {\n\tatomicKey := e.dmap + e.key\n\tdm.s.locker.Lock(atomicKey)\n\tdefer func() {\n\t\terr := dm.s.locker.Unlock(atomicKey)\n\t\tif err != nil {\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to release the fine grained lock for key: %s on DMap: %s: %v\", e.key, e.dmap, err)\n\t\t}\n\t}()\n\n\tcurrent, ttl, err := dm.loadCurrentAtomicInt(e)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar updated int\n\tswitch cmd {\n\tcase protocol.DMap.Incr:\n\t\tupdated = current + delta\n\tcase protocol.DMap.Decr:\n\t\tupdated = current - delta\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"invalid operation\")\n\t}\n\n\tvalueBuf := pool.Get()\n\tdefer pool.Put(valueBuf)\n\n\tenc := resp.New(valueBuf)\n\terr = enc.Encode(updated)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\te.value = make([]byte, valueBuf.Len())\n\tcopy(e.value, valueBuf.Bytes())\n\n\tif ttl != 0 {\n\t\te.putConfig.HasPX = true\n\t\te.putConfig.PX = time.Until(time.UnixMilli(ttl))\n\t}\n\terr = dm.put(e)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn updated, nil\n}\n\n// Incr atomically increments key by delta. The return value is the new value after being incremented or an error.\nfunc (dm *DMap) Incr(ctx context.Context, key string, delta int) (int, error) {\n\te := newEnv(ctx)\n\te.dmap = dm.name\n\te.key = key\n\treturn dm.atomicIncrDecr(protocol.DMap.Incr, e, delta)\n}\n\n// Decr atomically decrements key by delta. The return value is the new value after being decremented or an error.\nfunc (dm *DMap) Decr(ctx context.Context, key string, delta int) (int, error) {\n\te := newEnv(ctx)\n\te.dmap = dm.name\n\te.key = key\n\treturn dm.atomicIncrDecr(protocol.DMap.Decr, e, delta)\n}\n\nfunc (dm *DMap) getPut(e *env) (storage.Entry, error) {\n\tatomicKey := e.dmap + e.key\n\tdm.s.locker.Lock(atomicKey)\n\tdefer func() {\n\t\terr := dm.s.locker.Unlock(atomicKey)\n\t\tif err != nil {\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to release the lock for key: %s on DMap: %s: %v\", e.key, e.dmap, err)\n\t\t}\n\t}()\n\n\tentry, err := dm.Get(e.ctx, e.key)\n\tif errors.Is(err, ErrKeyNotFound) {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = dm.put(e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif entry == nil {\n\t\t// The value is nil.\n\t\treturn nil, nil\n\t}\n\treturn entry, nil\n}\n\n// GetPut atomically sets key to value and returns the old value stored at key.\nfunc (dm *DMap) GetPut(ctx context.Context, key string, value interface{}) (storage.Entry, error) {\n\tif value == nil {\n\t\tvalue = struct{}{}\n\t}\n\n\tvalueBuf := pool.Get()\n\tdefer pool.Put(valueBuf)\n\n\tenc := resp.New(valueBuf)\n\terr := enc.Encode(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := newEnv(ctx)\n\te.dmap = dm.name\n\te.key = key\n\te.value = make([]byte, valueBuf.Len())\n\tcopy(e.value, valueBuf.Bytes())\n\n\traw, err := dm.getPut(e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif raw == nil {\n\t\treturn nil, nil\n\t}\n\treturn raw, nil\n}\n\nfunc (dm *DMap) atomicIncrByFloat(e *env, delta float64) (float64, error) {\n\tatomicKey := e.dmap + e.key\n\tdm.s.locker.Lock(atomicKey)\n\tdefer func() {\n\t\terr := dm.s.locker.Unlock(atomicKey)\n\t\tif err != nil {\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to release the fine grained lock for key: %s on DMap: %s: %v\", e.key, e.dmap, err)\n\t\t}\n\t}()\n\n\tvar current float64\n\tentry, err := dm.Get(e.ctx, e.key)\n\tif errors.Is(err, ErrKeyNotFound) {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif entry != nil {\n\t\tcurrent, err = util.ParseFloat(entry.Value(), 64)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tlatest := current + delta\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvalueBuf := pool.Get()\n\tdefer pool.Put(valueBuf)\n\n\tenc := resp.New(valueBuf)\n\terr = enc.Encode(latest)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\te.value = valueBuf.Bytes()\n\te.value = make([]byte, valueBuf.Len())\n\tcopy(e.value, valueBuf.Bytes())\n\n\terr = dm.put(e)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn latest, nil\n}\n\n// IncrByFloat atomically increments key by delta. The return value is the new value after being incremented or an error.\nfunc (dm *DMap) IncrByFloat(ctx context.Context, key string, delta float64) (float64, error) {\n\te := newEnv(ctx)\n\te.dmap = dm.name\n\te.key = key\n\treturn dm.atomicIncrByFloat(e, delta)\n}\n"
  },
  {
    "path": "internal/dmap/atomic_handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"strconv\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc (s *Service) incrDecrCommon(cmd, dmap, key string, delta int) (int, error) {\n\tdm, err := s.getOrCreateDMap(dmap)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\te := newEnv(s.ctx)\n\te.dmap = dm.name\n\te.key = key\n\treturn dm.atomicIncrDecr(cmd, e, delta)\n}\n\nfunc (s *Service) incrCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tincrCmd, err := protocol.ParseIncrCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tlatest, err := s.incrDecrCommon(protocol.DMap.Incr, incrCmd.DMap, incrCmd.Key, incrCmd.Delta)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteInt(latest)\n}\n\nfunc (s *Service) decrCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tdecrCmd, err := protocol.ParseDecrCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tlatest, err := s.incrDecrCommon(protocol.DMap.Decr, decrCmd.DMap, decrCmd.Key, decrCmd.Delta)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteInt(latest)\n}\n\nfunc (s *Service) getPutCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tgetPutCmd, err := protocol.ParseGetPutCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tdm, err := s.getOrCreateDMap(getPutCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\te := newEnv(s.ctx)\n\te.dmap = getPutCmd.DMap\n\te.key = getPutCmd.Key\n\te.value = getPutCmd.Value\n\told, err := dm.getPut(e)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tif old == nil {\n\t\tconn.WriteNull()\n\t\treturn\n\t}\n\n\tif getPutCmd.Raw {\n\t\tconn.WriteBulk(old.Encode())\n\t\treturn\n\t}\n\n\tconn.WriteBulk(old.Value())\n}\n\nfunc (s *Service) incrByFloatCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tincrCmd, err := protocol.ParseIncrByFloatCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(incrCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\te := newEnv(s.ctx)\n\te.dmap = dm.name\n\te.key = incrCmd.Key\n\tlatest, err := dm.atomicIncrByFloat(e, incrCmd.Delta)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tconn.WriteBulkString(strconv.FormatFloat(latest, 'f', -1, 64))\n}\n"
  },
  {
    "path": "internal/dmap/atomic_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/resp\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/stretchr/testify/require\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\nfunc TestDMap_loadCurrentAtomicInt(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tkey := \"incr\"\n\n\tttlDuration := time.Second * 5\n\ts.config.DMaps.TTLDuration = time.Second * 5\n\n\tdm, err := s.NewDMap(\"atomic_test\")\n\trequire.NoError(t, err)\n\n\t_, err = dm.Incr(ctx, key, 1)\n\tif err != nil {\n\t\ts.log.V(2).Printf(\"[ERROR] Failed to call Incr: %v\", err)\n\t\treturn\n\t}\n\n\te := newEnv(ctx)\n\te.dmap = dm.name\n\te.key = key\n\t_, ttl, err := dm.loadCurrentAtomicInt(e)\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond * 500)\n\trequire.WithinDuration(t, time.UnixMilli(ttl), time.Now(), ttlDuration)\n}\n\nfunc TestDMap_Atomic_Incr(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tvar wg sync.WaitGroup\n\tvar start chan struct{}\n\tkey := \"incr\"\n\n\tctx := context.Background()\n\tincr := func(dm *DMap) {\n\t\t<-start\n\t\tdefer wg.Done()\n\n\t\t_, err := dm.Incr(ctx, key, 1)\n\t\tif err != nil {\n\t\t\ts.log.V(2).Printf(\"[ERROR] Failed to call Incr: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tdm, err := s.NewDMap(\"atomic_test\")\n\trequire.NoError(t, err)\n\n\tstart = make(chan struct{})\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo incr(dm)\n\t}\n\tclose(start)\n\twg.Wait()\n\n\tgr, err := dm.Get(ctx, key)\n\trequire.NoError(t, err)\n\n\tvar res int\n\terr = resp.Scan(gr.Value(), &res)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 100, res)\n}\n\nfunc TestDMap_Atomic_Decr(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tvar wg sync.WaitGroup\n\tvar start chan struct{}\n\tkey := \"decr\"\n\n\tctx := context.Background()\n\n\tdecr := func(dm *DMap) {\n\t\t<-start\n\t\tdefer wg.Done()\n\n\t\t_, err := dm.Decr(ctx, key, 1)\n\t\tif err != nil {\n\t\t\ts.log.V(2).Printf(\"[ERROR] Failed to call Decr: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tdm, err := s.NewDMap(\"atomic_test\")\n\trequire.NoError(t, err)\n\n\tstart = make(chan struct{})\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo decr(dm)\n\t}\n\tclose(start)\n\twg.Wait()\n\n\tres, err := dm.Get(context.Background(), key)\n\trequire.NoError(t, err)\n\n\tvar value int\n\terr = resp.Scan(res.Value(), &value)\n\trequire.NoError(t, err)\n\trequire.Equal(t, -100, value)\n}\n\nfunc TestDMap_Atomic_GetPut(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tvar total int64\n\tvar wg sync.WaitGroup\n\tvar start chan struct{}\n\tkey := \"getput\"\n\tgetput := func(dm *DMap, i int) {\n\t\t<-start\n\t\tdefer wg.Done()\n\n\t\tgr, err := dm.GetPut(context.Background(), key, i)\n\t\tif err != nil {\n\t\t\ts.log.V(2).Printf(\"[ERROR] Failed to call Decr: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif gr != nil {\n\t\t\tvar oldval int\n\t\t\terr = resp.Scan(gr.Value(), &oldval)\n\t\t\trequire.NoError(t, err)\n\t\t\tatomic.AddInt64(&total, int64(oldval))\n\t\t}\n\t}\n\n\tdm, err := s.NewDMap(\"atomic_test\")\n\trequire.NoError(t, err)\n\n\tstart = make(chan struct{})\n\tvar final int64\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo getput(dm, i)\n\t\tfinal += int64(i)\n\t}\n\tclose(start)\n\twg.Wait()\n\n\tgr, err := dm.Get(context.Background(), key)\n\trequire.NoError(t, err)\n\n\tvar last int\n\terr = resp.Scan(gr.Value(), &last)\n\trequire.NoError(t, err)\n\n\tatomic.AddInt64(&total, int64(last))\n\trequire.Equal(t, final, atomic.LoadInt64(&total))\n}\n\nfunc TestDMap_Atomic_IncrByFloat(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tvar wg sync.WaitGroup\n\tvar start chan struct{}\n\tkey := \"incrbyfloat\"\n\n\tctx := context.Background()\n\tincrByFloat := func(dm *DMap) {\n\t\t<-start\n\t\tdefer wg.Done()\n\n\t\t_, err := dm.IncrByFloat(ctx, key, 1.2)\n\t\tif err != nil {\n\t\t\ts.log.V(2).Printf(\"[ERROR] Failed to call IncrByFloat: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tdm, err := s.NewDMap(\"atomic_test\")\n\trequire.NoError(t, err)\n\n\tstart = make(chan struct{})\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo incrByFloat(dm)\n\t}\n\tclose(start)\n\twg.Wait()\n\n\tgr, err := dm.Get(ctx, key)\n\trequire.NoError(t, err)\n\n\tvar res float64\n\terr = resp.Scan(gr.Value(), &res)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 120.0000000000002, res)\n}\n\nfunc TestDMap_incrCommandHandler(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 100; i++ {\n\t\terrGr.Go(func() error {\n\t\t\tcmd := protocol.NewIncr(\"mydmap\", \"mykey\", 1).Command(context.Background())\n\t\t\trc := s.client.Get(s.rt.This().String())\n\t\t\terr := rc.Process(context.Background(), cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = cmd.Result()\n\t\t\treturn err\n\t\t})\n\t}\n\trequire.NoError(t, errGr.Wait())\n\n\tcmd := protocol.NewGet(\"mydmap\", \"mykey\").Command(context.Background())\n\trc := s.client.Get(s.rt.This().String())\n\terr := rc.Process(context.Background(), cmd)\n\trequire.NoError(t, err)\n\n\tvalue, err := cmd.Bytes()\n\trequire.NoError(t, err)\n\tv := new(int)\n\terr = resp.Scan(value, v)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 100, *v)\n}\n\nfunc TestDMap_incrCommandHandler_Single_Request(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tcmd := protocol.NewIncr(\"mydmap\", \"mykey\", 100).Command(context.Background())\n\trc := s.client.Get(s.rt.This().String())\n\terr := rc.Process(context.Background(), cmd)\n\trequire.NoError(t, err)\n\tvalue, err := cmd.Result()\n\n\trequire.NoError(t, err)\n\trequire.Equal(t, 100, int(value))\n}\n\nfunc TestDMap_decrCommandHandler(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 100; i++ {\n\t\terrGr.Go(func() error {\n\t\t\tcmd := protocol.NewDecr(\"mydmap\", \"mykey\", 1).Command(context.Background())\n\t\t\trc := s.client.Get(s.rt.This().String())\n\t\t\terr := rc.Process(context.Background(), cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = cmd.Result()\n\t\t\treturn err\n\t\t})\n\t}\n\trequire.NoError(t, errGr.Wait())\n\n\tcmd := protocol.NewGet(\"mydmap\", \"mykey\").Command(context.Background())\n\trc := s.client.Get(s.rt.This().String())\n\terr := rc.Process(context.Background(), cmd)\n\trequire.NoError(t, err)\n\n\tvalue, err := cmd.Bytes()\n\trequire.NoError(t, err)\n\tv := new(int)\n\terr = resp.Scan(value, v)\n\trequire.NoError(t, err)\n\trequire.Equal(t, -100, *v)\n}\n\nfunc TestDMap_decrCommandHandler_Single_Request(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tcmd := protocol.NewDecr(\"mydmap\", \"mykey\", 100).Command(context.Background())\n\trc := s.client.Get(s.rt.This().String())\n\terr := rc.Process(context.Background(), cmd)\n\trequire.NoError(t, err)\n\tvalue, err := cmd.Result()\n\n\trequire.NoError(t, err)\n\trequire.Equal(t, -100, int(value))\n}\n\nfunc TestDMap_exGetPutOperation(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tvar total int64\n\tvar final int64\n\tstart := make(chan struct{})\n\n\tgetPut := func(i int) error {\n\t\t<-start\n\n\t\tbuf := bytes.NewBuffer(nil)\n\t\tenc := resp.New(buf)\n\t\terr := enc.Encode(i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd := protocol.NewGetPut(\"mydmap\", \"mykey\", buf.Bytes()).Command(context.Background())\n\t\trc := s.client.Get(s.rt.This().String())\n\t\terr = rc.Process(context.Background(), cmd)\n\t\tif err == redis.Nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tval, err := cmd.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(val) != 0 {\n\t\t\toldval := new(int)\n\t\t\terr = resp.Scan(val, oldval)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tatomic.AddInt64(&total, int64(*oldval))\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 100; i++ {\n\t\tnum := i\n\t\terrGr.Go(func() error {\n\t\t\treturn getPut(num)\n\t\t})\n\t\tfinal += int64(i)\n\t}\n\n\tclose(start)\n\trequire.NoError(t, errGr.Wait())\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tgr, err := dm.Get(context.Background(), \"mykey\")\n\trequire.NoError(t, err)\n\n\tvar last int\n\terr = resp.Scan(gr.Value(), &last)\n\trequire.NoError(t, err)\n\n\tatomic.AddInt64(&total, int64(last))\n\trequire.Equal(t, final, atomic.LoadInt64(&total))\n}\n\nfunc TestDMap_incrByFloatCommandHandler(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tvar errGr errgroup.Group\n\tfor i := 0; i < 100; i++ {\n\t\terrGr.Go(func() error {\n\t\t\tcmd := protocol.NewIncrByFloat(\"mydmap\", \"mykey\", 1.2).Command(context.Background())\n\t\t\trc := s.client.Get(s.rt.This().String())\n\t\t\terr := rc.Process(context.Background(), cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = cmd.Result()\n\t\t\treturn err\n\t\t})\n\t}\n\trequire.NoError(t, errGr.Wait())\n\n\tcmd := protocol.NewGet(\"mydmap\", \"mykey\").Command(context.Background())\n\trc := s.client.Get(s.rt.This().String())\n\terr := rc.Process(context.Background(), cmd)\n\trequire.NoError(t, err)\n\n\tvalue, err := cmd.Bytes()\n\trequire.NoError(t, err)\n\tv := new(float64)\n\terr = resp.Scan(value, v)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 120.0000000000002, *v)\n}\n"
  },
  {
    "path": "internal/dmap/balance.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/events\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/pkg/neterrors\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/tidwall/redcon\"\n\t\"github.com/vmihailenco/msgpack/v5\"\n)\n\ntype fragmentPack struct {\n\tPartID  uint64\n\tKind    partitions.Kind\n\tName    string\n\tPayload []byte\n}\n\nfunc (dm *DMap) fragmentMergeFunction(f *fragment, hkey uint64, entry storage.Entry) error {\n\tcurrent, err := f.storage.Get(hkey)\n\tif errors.Is(err, storage.ErrKeyNotFound) {\n\t\treturn f.storage.Put(hkey, entry)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tversions := []*version{{entry: current}, {entry: entry}}\n\tversions = dm.sortVersions(versions)\n\twinner := versions[0].entry\n\tif winner == current {\n\t\t// No need to insert the winner\n\t\treturn nil\n\t}\n\treturn f.storage.Put(hkey, winner)\n}\n\nfunc (dm *DMap) mergeFragments(part *partitions.Partition, fp *fragmentPack) error {\n\tf, err := dm.loadOrCreateFragment(part)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Acquire fragment's lock. No one should work on it.\n\tf.Lock()\n\tdefer f.Unlock()\n\n\treturn f.storage.Import(fp.Payload, func(hkey uint64, entry storage.Entry) error {\n\t\treturn dm.fragmentMergeFunction(f, hkey, entry)\n\t})\n}\n\nfunc (s *Service) checkOwnership(part *partitions.Partition) bool {\n\towners := part.Owners()\n\tfor _, owner := range owners {\n\t\tif owner.CompareByID(s.rt.This()) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (s *Service) validateFragmentPack(fp *fragmentPack) error {\n\tif fp.PartID >= s.config.PartitionCount {\n\t\treturn fmt.Errorf(\"invalid partition id: %d\", fp.PartID)\n\t}\n\n\tvar part *partitions.Partition\n\tif fp.Kind == partitions.PRIMARY {\n\t\tpart = s.primary.PartitionByID(fp.PartID)\n\t} else {\n\t\tpart = s.backup.PartitionByID(fp.PartID)\n\t}\n\n\t// Check ownership before merging. This is useful to prevent data corruption in network partitioning case.\n\tif !s.checkOwnership(part) {\n\t\treturn fmt.Errorf(\"%w: %s\",\n\t\t\tneterrors.ErrInvalidArgument, fmt.Sprintf(\"partID: %d (kind: %s) doesn't belong to %s\",\n\t\t\t\tfp.PartID, fp.Kind, s.rt.This()))\n\t}\n\treturn nil\n}\n\nfunc (s *Service) moveFragmentCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tmoveFragmentCmd, err := protocol.ParseMoveFragmentCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tfp := &fragmentPack{}\n\terr = msgpack.Unmarshal(moveFragmentCmd.Payload, fp)\n\tif err != nil {\n\t\ts.log.V(2).Printf(\"[ERROR] Failed to unmarshal DMap: %v\", err)\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tif err = s.validateFragmentPack(fp); err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tvar part *partitions.Partition\n\tif fp.Kind == partitions.PRIMARY {\n\t\tpart = s.primary.PartitionByID(fp.PartID)\n\t} else {\n\t\tpart = s.backup.PartitionByID(fp.PartID)\n\t}\n\ts.log.V(2).Printf(\"[INFO] Received DMap (kind: %s): %s on PartID: %d\", fp.Kind, fp.Name, fp.PartID)\n\n\tdm, err := s.NewDMap(fp.Name)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\terr = dm.mergeFragments(part, fp)\n\tif err != nil {\n\t\ts.log.V(2).Printf(\"[ERROR] Failed to merge Received DMap (kind: %s): %s on PartID: %d: %v\",\n\t\t\tfp.Kind, fp.Name, fp.PartID, err)\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tif s.config.EnableClusterEventsChannel {\n\t\te := &events.FragmentReceivedEvent{\n\t\t\tKind:          events.KindFragmentReceivedEvent,\n\t\t\tSource:        s.rt.This().String(),\n\t\t\tDataStructure: \"dmap\",\n\t\t\tPartitionID:   part.ID(),\n\t\t\tIdentifier:    fp.Name,\n\t\t\tLength:        len(moveFragmentCmd.Payload),\n\t\t\tIsBackup:      part.Kind() == partitions.BACKUP,\n\t\t\tTimestamp:     time.Now().UnixNano(),\n\t\t}\n\t\ts.wg.Add(1)\n\t\tgo s.publishEvent(e)\n\t}\n\n\tconn.WriteString(protocol.StatusOK)\n}\n"
  },
  {
    "path": "internal/dmap/balance_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/events\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc TestDMap_Balance_Invalid_PartID(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tfp := &fragmentPack{\n\t\tPartID:  12312,\n\t\tKind:    partitions.PRIMARY,\n\t\tName:    \"foobar\",\n\t\tPayload: nil,\n\t}\n\terr := s.validateFragmentPack(fp)\n\trequire.Error(t, err)\n}\n\nfunc TestDMap_Balance_FragmentMergeFunction(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(context.Background(), \"mykey\", \"myval\", nil)\n\trequire.NoError(t, err)\n\n\thkey := partitions.HKey(\"mymap\", \"mykey\")\n\tpart := dm.getPartitionByHKey(hkey, partitions.PRIMARY)\n\tf, err := dm.loadFragment(part)\n\trequire.NoError(t, err)\n\n\tcurrentValue := []byte(\"current-value\")\n\te := dm.engine.NewEntry()\n\te.SetKey(\"mykey\")\n\te.SetTimestamp(time.Now().UnixNano())\n\te.SetValue(currentValue)\n\n\terr = dm.fragmentMergeFunction(f, hkey, e)\n\trequire.NoError(t, err)\n\n\twinner, err := f.storage.Get(hkey)\n\trequire.NoError(t, err)\n\trequire.Equal(t, currentValue, winner.Value())\n}\n\nfunc TestDMap_Balancer_JoinNewNode(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tdb1 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := db1.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tvar totalKeys = 1000\n\tfor i := 0; i < totalKeys; i++ {\n\t\tkey := \"balancer-test.\" + strconv.Itoa(i)\n\t\terr = dm.Put(ctx, key, testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// This is an integration test. Here we try to observe the behavior of\n\t// balancer with the DMap service.\n\n\tdb2 := cluster.AddMember(nil).(*Service) // This automatically syncs the cluster.\n\n\tvar db1TotalKeys int\n\tfor partID := uint64(0); partID < db1.config.PartitionCount; partID++ {\n\t\tpart := db1.primary.PartitionByID(partID)\n\t\tdb1TotalKeys += part.Length()\n\t}\n\trequire.Less(t, db1TotalKeys, totalKeys)\n\n\tvar db2TotalKeys int\n\tfor partID := uint64(0); partID < db2.config.PartitionCount; partID++ {\n\t\tpart := db2.primary.PartitionByID(partID)\n\t\tdb2TotalKeys += part.Length()\n\t}\n\trequire.Less(t, db2TotalKeys, totalKeys)\n\n\trequire.Equal(t, totalKeys, db1TotalKeys+db2TotalKeys)\n}\n\nfunc TestDMap_Balancer_WrongOwnership(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tdb1 := cluster.AddMember(nil).(*Service)\n\tdb2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tvar id uint64\n\tfor partID := uint64(0); partID < db2.config.PartitionCount; partID++ {\n\t\tpart := db2.primary.PartitionByID(partID)\n\t\tif part.Owner().CompareByID(db2.rt.This()) {\n\t\t\tid = part.ID()\n\t\t\tbreak\n\t\t}\n\t}\n\tfp := &fragmentPack{\n\t\tPartID: id,\n\t\tKind:   partitions.PRIMARY,\n\t}\n\t// invalid argument: partID: 1 (kind: Primary) doesn't belong to 127.0.0.1:62096\n\trequire.Error(t, db1.validateFragmentPack(fp))\n}\n\nfunc TestDMap_Balancer_ClusterEvents(t *testing.T) {\n\tc1 := testutil.NewConfig()\n\tc1.TriggerBalancerInterval = time.Millisecond\n\tc1.EnableClusterEventsChannel = true\n\te1 := testcluster.NewEnvironment(c1)\n\n\tcluster := testcluster.New(NewService)\n\tdb1 := cluster.AddMember(e1).(*Service)\n\tdefer cluster.Shutdown()\n\n\tresult := make(chan string, 1)\n\tdb1.server.ServeMux().HandleFunc(protocol.PubSub.Publish, func(conn redcon.Conn, cmd redcon.Command) {\n\t\tpublishCmd, err := protocol.ParsePublishCommand(cmd)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, events.ClusterEventsChannel, publishCmd.Channel)\n\n\t\tresult <- publishCmd.Message\n\n\t\tconn.WriteInt(1)\n\t})\n\n\tdm, err := db1.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\tvar totalKeys = 1000\n\tfor i := 0; i < totalKeys; i++ {\n\t\tkey := \"balancer-test.\" + strconv.Itoa(i)\n\t\terr = dm.Put(context.Background(), key, testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\tgo func() {\n\t\tc2 := testutil.NewConfig()\n\t\tc1.TriggerBalancerInterval = time.Millisecond\n\t\tc2.EnableClusterEventsChannel = true\n\t\te2 := testcluster.NewEnvironment(c2)\n\t\ts2 := testutil.NewServer(c2)\n\t\ts2.ServeMux().HandleFunc(protocol.PubSub.Publish, func(conn redcon.Conn, cmd redcon.Command) {\n\t\t\tpublishCmd, err := protocol.ParsePublishCommand(cmd)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, events.ClusterEventsChannel, publishCmd.Channel)\n\n\t\t\tresult <- publishCmd.Message\n\n\t\t\tconn.WriteInt(1)\n\t\t})\n\t\te2.Set(\"server\", s2)\n\t\tcluster.AddMember(e2)\n\t}()\n\n\tfragmentEvents := make(map[uint64]map[string]struct{})\nL:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-result:\n\t\t\tvalue := make(map[string]interface{})\n\t\t\terr = json.Unmarshal([]byte(msg), &value)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tkind := value[\"kind\"].(string)\n\t\t\tif kind == events.KindFragmentMigrationEvent || kind == events.KindFragmentReceivedEvent {\n\t\t\t\tpartID := uint64(value[\"partition_id\"].(float64))\n\t\t\t\tev, ok := fragmentEvents[partID]\n\t\t\t\tif ok {\n\t\t\t\t\tev[kind] = struct{}{}\n\t\t\t\t} else {\n\t\t\t\t\tfragmentEvents[partID] = map[string]struct{}{kind: {}}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-time.After(time.Second):\n\t\t\tbreak L\n\t\t}\n\t}\n\n\tfor partID, data := range fragmentEvents {\n\t\trequire.Len(t, data, 2)\n\t\tpart := db1.primary.PartitionByID(partID)\n\t\t// Transferred to db2\n\t\trequire.NotEqual(t, part.Owner().ID, db1.rt.This().ID)\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/compaction.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"golang.org/x/sync/semaphore\"\n)\n\nfunc (s *Service) callCompactionOnFragment(f *fragment) bool {\n\tfor {\n\t\tf.Lock()\n\t\tdone, err := f.Compaction()\n\t\tif err != nil {\n\t\t\tf.Unlock()\n\t\t\t// Continue\n\t\t\treturn true\n\t\t}\n\t\tf.Unlock()\n\n\t\tif done {\n\t\t\treturn true\n\t\t}\n\n\t\tselect {\n\t\tcase <-s.ctx.Done():\n\t\t\t// Break\n\t\t\treturn false\n\t\tcase <-time.After(time.Millisecond):\n\t\t}\n\t}\n}\n\nfunc (s *Service) doCompaction(partID uint64) {\n\tcompaction := func(part *partitions.Partition) {\n\t\tpart.Map().Range(func(name, tmp interface{}) bool {\n\t\t\tif !strings.HasPrefix(name.(string), \"dmap.\") {\n\t\t\t\t// Continue. This fragment belongs to a different data structure.\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tf := tmp.(*fragment)\n\t\t\treturn s.callCompactionOnFragment(f)\n\t\t})\n\t}\n\n\tpart := s.primary.PartitionByID(partID)\n\tcompaction(part)\n\n\tbackup := s.backup.PartitionByID(partID)\n\tcompaction(backup)\n}\n\nfunc (s *Service) triggerCompaction() {\n\tvar wg sync.WaitGroup\n\n\t// NumCPU returns the number of logical CPUs usable by the current process.\n\t//\n\t// The set of available CPUs is checked by querying the operating system\n\t// at process startup. Changes to operating system CPU allocation after\n\t// process startup are not reflected.\n\tnumWorkers := runtime.NumCPU()\n\tsem := semaphore.NewWeighted(int64(numWorkers))\n\tfor partID := uint64(0); partID < s.config.PartitionCount; partID++ {\n\t\tselect {\n\t\tcase <-s.ctx.Done():\n\t\t\tbreak\n\t\tdefault:\n\t\t}\n\n\t\tif err := sem.Acquire(s.ctx, 1); err != nil {\n\t\t\tif err != context.Canceled {\n\t\t\t\ts.log.V(3).Printf(\"[ERROR] Failed to acquire semaphore for DMap compaction: %v\", err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func(id uint64) {\n\t\t\tdefer wg.Done()\n\t\t\tdefer sem.Release(1)\n\t\t\ts.doCompaction(id)\n\t\t}(partID)\n\t}\n\n\twg.Wait()\n}\n\nfunc (s *Service) compactionWorker() {\n\tdefer s.wg.Done()\n\n\ttimer := time.NewTimer(s.config.DMaps.TriggerCompactionInterval)\n\tdefer timer.Stop()\n\n\tfor {\n\t\ttimer.Reset(s.config.DMaps.TriggerCompactionInterval)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\ts.triggerCompaction()\n\t\tcase <-s.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/compaction_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/ramblock\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Compaction(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tc := testutil.NewConfig()\n\tc.DMaps.TriggerCompactionInterval = time.Millisecond\n\tc.DMaps.Engine.Name = config.DefaultStorageEngine\n\n\tc.DMaps.Engine.Config = map[string]interface{}{\n\t\t\"tableSize\":           uint64(2048), // overwrite tableSize to trigger compaction.\n\t\t\"maxIdleTableTimeout\": time.Millisecond,\n\t}\n\tkv, err := ramblock.New(storage.NewConfig(c.DMaps.Engine.Config))\n\trequire.NoError(t, err)\n\tc.DMaps.Engine.Implementation = kv\n\n\te := testcluster.NewEnvironment(c)\n\ts := cluster.AddMember(e).(*Service)\n\tdefer cluster.Shutdown()\n\n\tcheckStorageStats := func() (allocated int) {\n\t\tfor partID := uint64(0); partID < s.config.PartitionCount; partID++ {\n\t\t\tpart := s.primary.PartitionByID(partID)\n\t\t\ttmp, ok := part.Map().Load(s.fragmentName(\"mymap\"))\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tf := tmp.(*fragment)\n\t\t\tf.RLock()\n\t\t\ts := f.storage.Stats()\n\t\t\tallocated += s.Allocated\n\t\t\tf.RUnlock()\n\t\t}\n\t\treturn\n\t}\n\n\tdm, err := s.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 10000; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\tinitialAllocated := checkStorageStats()\n\n\tfor i := 0; i < 10000; i++ {\n\t\tif i%2 != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = dm.Delete(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t}\n\n\terr = testutil.TryWithInterval(50, 100*time.Millisecond, func() error {\n\t\tallocated := checkStorageStats()\n\t\tif initialAllocated <= allocated {\n\t\t\treturn fmt.Errorf(\"initial allocation is still greater than or equal the current allocation\")\n\t\t}\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n}\n"
  },
  {
    "path": "internal/dmap/config.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n)\n\n// dmapConfig keeps DMap config control parameters and access-log for keys in a dmap.\ntype dmapConfig struct {\n\tengine          *config.Engine\n\tmaxIdleDuration time.Duration\n\tttlDuration     time.Duration\n\tmaxKeys         int\n\tmaxInuse        int\n\tlruSamples      int\n\tevictionPolicy  config.EvictionPolicy\n}\n\nfunc (c *dmapConfig) load(dc *config.DMaps, name string) error {\n\t// Try to set config configuration for this dmap.\n\tc.maxIdleDuration = dc.MaxIdleDuration\n\tc.ttlDuration = dc.TTLDuration\n\tc.maxKeys = dc.MaxKeys\n\tc.maxInuse = dc.MaxInuse\n\tc.lruSamples = dc.LRUSamples\n\tc.evictionPolicy = dc.EvictionPolicy\n\tc.engine = dc.Engine\n\n\tif dc.Custom != nil {\n\t\t// config.DMap struct can be used for fine-grained control.\n\t\tcs, ok := dc.Custom[name]\n\t\tif ok {\n\t\t\tif c.maxIdleDuration != cs.MaxIdleDuration {\n\t\t\t\tc.maxIdleDuration = cs.MaxIdleDuration\n\t\t\t}\n\t\t\tif c.ttlDuration != cs.TTLDuration {\n\t\t\t\tc.ttlDuration = cs.TTLDuration\n\t\t\t}\n\t\t\tif c.evictionPolicy != cs.EvictionPolicy {\n\t\t\t\tc.evictionPolicy = cs.EvictionPolicy\n\t\t\t}\n\t\t\tif c.maxKeys != cs.MaxKeys {\n\t\t\t\tc.maxKeys = cs.MaxKeys\n\t\t\t}\n\t\t\tif c.maxInuse != cs.MaxInuse {\n\t\t\t\tc.maxInuse = cs.MaxInuse\n\t\t\t}\n\t\t\tif c.lruSamples != cs.LRUSamples {\n\t\t\t\tc.lruSamples = cs.LRUSamples\n\t\t\t}\n\t\t\tif c.evictionPolicy != cs.EvictionPolicy {\n\t\t\t\tc.evictionPolicy = cs.EvictionPolicy\n\t\t\t}\n\t\t\tif c.engine == nil {\n\t\t\t\tc.engine = cs.Engine\n\t\t\t}\n\t\t}\n\t}\n\n\t//TODO: Create a new function to verify config.\n\tif c.evictionPolicy == config.LRUEviction {\n\t\tif c.maxInuse <= 0 && c.maxKeys <= 0 {\n\t\t\treturn fmt.Errorf(\"maxInuse or maxKeys have to be greater than zero\")\n\t\t}\n\t\t// set the default value.\n\t\tif c.lruSamples == 0 {\n\t\t\tc.lruSamples = config.DefaultLRUSamples\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "internal/dmap/config_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Config(t *testing.T) {\n\tc := config.New(\"local\")\n\t// Config for all new DMaps\n\tc.DMaps.NumEvictionWorkers = 1\n\tc.DMaps.TTLDuration = 100 * time.Second\n\tc.DMaps.MaxKeys = 100000\n\tc.DMaps.MaxInuse = 1000000\n\tc.DMaps.LRUSamples = 10\n\tc.DMaps.EvictionPolicy = config.LRUEviction\n\tc.DMaps.Engine = testutil.NewEngineConfig(t)\n\n\t// Config for specified DMaps\n\tc.DMaps.Custom = map[string]config.DMap{\"foobar\": {\n\t\tMaxIdleDuration: 60 * time.Second,\n\t\tTTLDuration:     300 * time.Second,\n\t\tMaxKeys:         500000,\n\t\tLRUSamples:      20,\n\t\tEvictionPolicy:  \"NONE\",\n\t\tEngine: &config.Engine{\n\t\t\tName: \"ramblock\",\n\t\t\tConfig: map[string]interface{}{\n\t\t\t\t\"maxIdleTableTimeout\": 15 * time.Minute,\n\t\t\t\t\"tableSize\":           uint64(1048576),\n\t\t\t},\n\t\t},\n\t}}\n\n\tdc := dmapConfig{}\n\terr := dc.load(c.DMaps, \"mydmap\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, c.DMaps.TTLDuration, dc.ttlDuration)\n\trequire.Equal(t, c.DMaps.MaxKeys, dc.maxKeys)\n\trequire.Equal(t, c.DMaps.MaxInuse, dc.maxInuse)\n\trequire.Equal(t, c.DMaps.LRUSamples, dc.lruSamples)\n\trequire.Equal(t, c.DMaps.EvictionPolicy, dc.evictionPolicy)\n\trequire.Equal(t, c.DMaps.Engine.Name, dc.engine.Name)\n\n\tt.Run(\"Custom config\", func(t *testing.T) {\n\t\tdcc := dmapConfig{}\n\t\terr := dcc.load(c.DMaps, \"foobar\")\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, c.DMaps.Custom[\"foobar\"].TTLDuration, dcc.ttlDuration)\n\t\trequire.Equal(t, c.DMaps.Custom[\"foobar\"].MaxKeys, dcc.maxKeys)\n\t\trequire.Equal(t, c.DMaps.Custom[\"foobar\"].MaxInuse, dcc.maxInuse)\n\t\trequire.Equal(t, c.DMaps.Custom[\"foobar\"].LRUSamples, dcc.lruSamples)\n\t\trequire.Equal(t, c.DMaps.Custom[\"foobar\"].EvictionPolicy, dcc.evictionPolicy)\n\n\t\tc.DMaps.Custom[\"foobar\"].Engine.Implementation = nil\n\t\tdcc.engine.Implementation = nil\n\n\t\trequire.Equal(t, c.DMaps.Custom[\"foobar\"].Engine, dcc.engine)\n\t})\n}\n"
  },
  {
    "path": "internal/dmap/delete.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"errors\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/stats\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\nvar (\n\t// DeleteHits is the number of deletion requests resulting in an item being removed.\n\tDeleteHits = stats.NewInt64Counter()\n\n\t// DeleteMisses is the number of deletion requests for missing keys.\n\tDeleteMisses = stats.NewInt64Counter()\n)\n\nfunc (dm *DMap) deleteFromFragment(key string, kind partitions.Kind) error {\n\thkey := partitions.HKey(dm.name, key)\n\tpart := dm.getPartitionByHKey(hkey, kind)\n\tf, err := dm.loadFragment(part)\n\tif errors.Is(err, errFragmentNotFound) {\n\t\t// key doesn't exist\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Lock()\n\tdefer f.Unlock()\n\n\treturn f.storage.Delete(hkey)\n}\n\nfunc (dm *DMap) deleteFromPreviousOwners(key string, owners []discovery.Member) error {\n\t// Traverse in reverse order. Except from the latest host, this one.\n\tfor i := len(owners) - 2; i >= 0; i-- {\n\t\towner := owners[i]\n\t\tcmd := protocol.NewDelEntry(dm.name, key).Command(dm.s.ctx)\n\t\trc := dm.s.client.Get(owner.String())\n\t\terr := rc.Process(dm.s.ctx, cmd)\n\t\tif err != nil {\n\t\t\treturn protocol.ConvertError(err)\n\t\t}\n\t\terr = cmd.Err()\n\t\tif err != nil {\n\t\t\treturn protocol.ConvertError(err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (dm *DMap) deleteBackupOnCluster(hkey uint64, key string) error {\n\towners := dm.s.backup.PartitionOwnersByHKey(hkey)\n\tvar g errgroup.Group\n\tfor _, owner := range owners {\n\t\tmem := owner\n\t\tg.Go(func() error {\n\t\t\tcmd := protocol.NewDelEntry(dm.name, key).SetReplica().Command(dm.s.ctx)\n\t\t\trc := dm.s.client.Get(mem.String())\n\t\t\terr := rc.Process(dm.s.ctx, cmd)\n\t\t\tif err != nil {\n\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to delete replica key/value on %s: %s\", dm.name, err)\n\t\t\t\treturn protocol.ConvertError(err)\n\t\t\t}\n\t\t\treturn protocol.ConvertError(cmd.Err())\n\t\t})\n\t}\n\treturn g.Wait()\n}\n\n// deleteOnCluster is not a thread-safe function\nfunc (dm *DMap) deleteOnCluster(hkey uint64, key string, f *fragment) error {\n\towners := dm.s.primary.PartitionOwnersByHKey(hkey)\n\tif len(owners) == 0 {\n\t\tpanic(\"partition owners list cannot be empty\")\n\t}\n\n\terr := dm.deleteFromPreviousOwners(key, owners)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif dm.s.config.ReplicaCount != 0 {\n\t\terr := dm.deleteBackupOnCluster(hkey, key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = f.storage.Delete(hkey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// DeleteHits is the number of deletion reqs resulting in an item being removed.\n\tDeleteHits.Increase(1)\n\n\treturn nil\n}\n\nfunc (dm *DMap) deleteKey(key string) error {\n\thkey := partitions.HKey(dm.name, key)\n\tpart := dm.getPartitionByHKey(hkey, partitions.PRIMARY)\n\tf, err := dm.loadOrCreateFragment(part)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Lock()\n\tdefer f.Unlock()\n\n\t// Check the HKey before trying to delete it.\n\tif !f.storage.Check(hkey) {\n\t\t// DeleteMisses is the number of deletions reqs for missing keys\n\t\tDeleteMisses.Increase(1)\n\t\treturn nil\n\t}\n\n\treturn dm.deleteOnCluster(hkey, key, f)\n}\n\nfunc (dm *DMap) deleteKeys(ctx context.Context, keys ...string) (int, error) {\n\tmembers := make(map[discovery.Member][]string)\n\tfor _, key := range keys {\n\t\thkey := partitions.HKey(dm.name, key)\n\t\tmember := dm.s.primary.PartitionByHKey(hkey).Owner()\n\t\tmembers[member] = append(members[member], key)\n\t}\n\n\tfor member, distributedKeys := range members {\n\t\tif member.CompareByName(dm.s.rt.This()) {\n\t\t\tfor _, key := range distributedKeys {\n\t\t\t\tif err := dm.deleteKey(key); err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tcmd := protocol.NewDel(dm.name, distributedKeys...).Command(dm.s.ctx)\n\t\t\trc := dm.s.client.Get(member.String())\n\t\t\terr := rc.Process(ctx, cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, protocol.ConvertError(err)\n\t\t\t}\n\n\t\t\treturn 0, protocol.ConvertError(cmd.Err())\n\t\t}\n\t}\n\n\treturn len(keys), nil\n}\n\n// Delete deletes the value for the given key. Delete will not return error if key doesn't exist. It's thread-safe.\n// It is safe to modify the contents of the argument after Delete returns.\nfunc (dm *DMap) Delete(ctx context.Context, keys ...string) (int, error) {\n\treturn dm.deleteKeys(ctx, keys...)\n}\n"
  },
  {
    "path": "internal/dmap/delete_handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc (s *Service) delCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tdelCmd, err := protocol.ParseDelCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tdm, err := s.getOrCreateDMap(delCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tcount, err := dm.deleteKeys(s.ctx, delCmd.Keys...)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tconn.WriteInt(count)\n}\n\nfunc (s *Service) delEntryCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tdelCmd, err := protocol.ParseDelEntryCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tdm, err := s.getOrCreateDMap(delCmd.Del.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tvar kind = partitions.PRIMARY\n\tif delCmd.Replica {\n\t\tkind = partitions.BACKUP\n\t}\n\tfor _, key := range delCmd.Del.Keys {\n\t\terr = dm.deleteFromFragment(key, kind)\n\t\tif err != nil {\n\t\t\tprotocol.WriteError(conn, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tconn.WriteInt(len(delCmd.Del.Keys))\n}\n"
  },
  {
    "path": "internal/dmap/delete_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/ramblock\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc checkEmptyStorageEngine(t *testing.T, s *Service) {\n\tmaximum := 50\n\tcheck := func(current int) (bool, error) {\n\t\tfor partID := uint64(0); partID < s.config.PartitionCount; partID++ {\n\t\t\tpart := s.primary.PartitionByID(partID)\n\t\t\ttmp, ok := part.Map().Load(\"dmap.mymap\")\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tf := tmp.(*fragment)\n\t\t\tf.RLock()\n\t\t\tnumTables := f.storage.Stats().NumTables\n\t\t\tf.RUnlock()\n\n\t\t\tif numTables != 1 && current < maximum-1 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tif numTables != 1 && current >= maximum-1 {\n\t\t\t\treturn false, fmt.Errorf(\"numTables=%d PartID: %d\", numTables, partID)\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\n\tfor i := 0; i < maximum; i++ {\n\t\tdone, err := check(i)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t\tif done {\n\t\t\treturn\n\t\t}\n\t\t<-time.After(100 * time.Millisecond)\n\t}\n\tt.Fatalf(\"Failed to control compaction status\")\n}\n\nfunc TestDMap_Delete_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm1, err := s1.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\tdm2, err := s2.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, err = dm2.Delete(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\n\t\t_, err = dm2.Get(ctx, testutil.ToKey(i))\n\t\trequire.ErrorIs(t, err, ErrKeyNotFound)\n\t}\n}\n\nfunc TestDMap_Delete_Lookup(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\tcluster.AddMember(nil)\n\tdefer cluster.Shutdown()\n\n\tdm1, err := s1.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\ts3 := cluster.AddMember(nil).(*Service)\n\n\tdm2, err := s3.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, err = dm2.Delete(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\n\t\t_, err = dm2.Get(ctx, testutil.ToKey(i))\n\t\trequire.ErrorIs(t, err, ErrKeyNotFound)\n\t}\n}\n\nfunc TestDMap_Delete_StaleFragments(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tc1 := testutil.NewConfig()\n\tc1.DMaps.CheckEmptyFragmentsInterval = time.Millisecond\n\te1 := testcluster.NewEnvironment(c1)\n\ts1 := cluster.AddMember(e1).(*Service)\n\n\tc2 := testutil.NewConfig()\n\tc2.DMaps.CheckEmptyFragmentsInterval = time.Millisecond\n\te2 := testcluster.NewEnvironment(c2)\n\ts2 := cluster.AddMember(e2).(*Service)\n\n\tdefer cluster.Shutdown()\n\n\tdm1, err := s1.NewDMap(\"mymap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t}\n\n\tdm2, err := s2.NewDMap(\"mymap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\t_, err = dm2.Delete(ctx, testutil.ToKey(i))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\n\t\t_, err = dm2.Get(ctx, testutil.ToKey(i))\n\t\tif !errors.Is(err, ErrKeyNotFound) {\n\t\t\tt.Fatalf(\"Expected ErrKeyNotFound. Got: %v\", err)\n\t\t}\n\t}\n\n\ts1.wg.Add(1)\n\tgo s1.janitorWorker()\n\ts2.wg.Add(1)\n\tgo s2.janitorWorker()\n\n\tvar dc int32\n\tfor i := 0; i < 1000; i++ {\n\t\tdc = 0\n\t\tfor partID := uint64(0); partID < s1.config.PartitionCount; partID++ {\n\t\t\tfor _, instance := range []*Service{s1, s2} {\n\t\t\t\tpart := instance.primary.PartitionByID(partID)\n\t\t\t\tpart.Map().Range(func(name, dm interface{}) bool { dc++; return true })\n\n\t\t\t\tbpart := instance.backup.PartitionByID(partID)\n\t\t\t\tbpart.Map().Range(func(name, dm interface{}) bool { dc++; return true })\n\t\t\t}\n\t\t}\n\t\tif dc == 0 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\tif dc != 0 {\n\t\tt.Fatalf(\"Expected dmap count is 0. Got: %d\", dc)\n\t}\n}\n\nfunc TestDMap_Delete_PreviousOwner(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\terr = dm.Put(context.Background(), \"mykey\", \"myvalue\", nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\tcmd := protocol.NewDelEntry(\"mydmap\", \"mykey\").Command(context.Background())\n\trc := s.client.Get(s.rt.This().String())\n\terr = rc.Process(context.Background(), cmd)\n\trequire.NoError(t, err)\n\trequire.NoError(t, cmd.Err())\n\n\t_, err = dm.Get(context.Background(), \"mykey\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestDMap_Delete_DeleteKeyValFromPreviousOwners(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tcluster.AddMember(nil)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\terr = dm.Put(context.Background(), \"mykey\", \"myvalue\", nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\t// Prepare fragmented partition owners list\n\thkey := partitions.HKey(\"mydmap\", \"mykey\")\n\towners := s.primary.PartitionOwnersByHKey(hkey)\n\towner := owners[len(owners)-1]\n\n\tvar data []discovery.Member\n\tfor _, member := range s.rt.Discovery().GetMembers() {\n\t\tif member.CompareByID(owner) {\n\t\t\tcontinue\n\t\t}\n\t\tdata = append(data, member)\n\t}\n\t// this has to be the last one\n\tdata = append(data, owner)\n\terr = dm.deleteFromPreviousOwners(\"mykey\", data)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n}\n\nfunc TestDMap_Delete_Backup(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\n\tc1 := testutil.NewConfig()\n\tc1.ReadRepair = true\n\tc1.ReplicaCount = 2\n\te1 := testcluster.NewEnvironment(c1)\n\ts1 := cluster.AddMember(e1).(*Service)\n\n\tc2 := testutil.NewConfig()\n\tc2.ReadRepair = true\n\tc2.ReplicaCount = 2\n\te2 := testcluster.NewEnvironment(c2)\n\ts2 := cluster.AddMember(e2).(*Service)\n\n\tdefer cluster.Shutdown()\n\n\tdm1, err := s1.NewDMap(\"mymap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tctx := context.Background()\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t}\n\n\tdm2, err := s2.NewDMap(\"mymap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, err = dm2.Delete(ctx, testutil.ToKey(i))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\n\t\t_, err = dm2.Get(ctx, testutil.ToKey(i))\n\t\tif err != ErrKeyNotFound {\n\t\t\tt.Fatalf(\"Expected ErrKeyNotFound. Got: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestDMap_Delete_Compaction(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tc := testutil.NewConfig()\n\tc.ReadRepair = true\n\tc.ReplicaCount = 2\n\tc.DMaps.TriggerCompactionInterval = time.Millisecond\n\tc.DMaps.Engine.Name = config.DefaultStorageEngine\n\n\tc.DMaps.Engine.Config = map[string]interface{}{\n\t\t\"tableSize\":           uint64(100), // overwrite tableSize to trigger compaction.\n\t\t\"maxIdleTableTimeout\": time.Millisecond,\n\t}\n\n\tkv, err := ramblock.New(storage.NewConfig(c.DMaps.Engine.Config))\n\trequire.NoError(t, err)\n\tc.DMaps.Engine.Implementation = kv\n\n\te := testcluster.NewEnvironment(c)\n\n\ts := cluster.AddMember(e).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\t_, err = dm.Delete(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\n\t\t_, err = dm.Get(ctx, testutil.ToKey(i))\n\t\trequire.ErrorIs(t, err, ErrKeyNotFound)\n\t}\n\tcheckEmptyStorageEngine(t, s)\n}\n"
  },
  {
    "path": "internal/dmap/destroy.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"runtime\"\n\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"golang.org/x/sync/errgroup\"\n\t\"golang.org/x/sync/semaphore\"\n)\n\nfunc (dm *DMap) destroyOnCluster(ctx context.Context) error {\n\tnum := int64(runtime.NumCPU())\n\tsem := semaphore.NewWeighted(num)\n\n\tvar g errgroup.Group\n\n\t// Don't block routing table to destroy a DMap on the cluster.\n\t// Just get a copy of members and run Destroy.\n\tvar members []discovery.Member\n\tm := dm.s.rt.Members()\n\tm.RLock()\n\tm.Range(func(_ uint64, member discovery.Member) bool {\n\t\tmembers = append(members, member)\n\t\treturn true\n\t})\n\tm.RUnlock()\n\n\tfor _, item := range members {\n\t\taddr := item.String()\n\t\tg.Go(func() error {\n\t\t\tif err := sem.Acquire(dm.s.ctx, 1); err != nil {\n\t\t\t\tdm.s.log.V(3).\n\t\t\t\t\tPrintf(\"[ERROR] Failed to acquire semaphore to call Destroy command on %s for %s: %v\",\n\t\t\t\t\t\taddr, dm.name, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer sem.Release(1)\n\n\t\t\tdm.s.log.V(6).Printf(\"[DEBUG] Calling DM.DESTROY command on %s for %s\", addr, dm.name)\n\t\t\tcmd := protocol.NewDestroy(dm.name).SetLocal().Command(dm.s.ctx)\n\t\t\trc := dm.s.client.Get(addr)\n\t\t\terr := rc.Process(ctx, cmd)\n\t\t\tif err != nil {\n\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] DM.DESTROY returned an error: %v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn cmd.Err()\n\t\t})\n\t}\n\treturn g.Wait()\n}\n\n// Destroy flushes the given DMap on the cluster. You should know that there\n// is no global lock on DMaps. So if you call Put, Put with EX and Destroy methods\n// concurrently on the cluster, Put and Put with EX calls may set new values to the DMap.\nfunc (dm *DMap) Destroy(ctx context.Context) error {\n\treturn dm.destroyOnCluster(ctx)\n}\n"
  },
  {
    "path": "internal/dmap/destroy_handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"errors\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc (dm *DMap) destroyFragmentOnPartition(part *partitions.Partition) error {\n\tf, err := dm.loadFragment(part)\n\tif errors.Is(err, errFragmentNotFound) {\n\t\t// not exists\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn wipeOutFragment(part, dm.fragmentName, f)\n}\n\nfunc (s *Service) destroyLocalDMap(name string) error {\n\t// This is very similar with rm -rf. Destroys given dmap on the cluster\n\tfor partID := uint64(0); partID < s.config.PartitionCount; partID++ {\n\t\tdm, err := s.getDMap(name)\n\t\tif errors.Is(err, ErrDMapNotFound) {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpart := dm.s.primary.PartitionByID(partID)\n\t\terr = dm.destroyFragmentOnPartition(part)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Destroy on replicas\n\t\tif s.config.ReplicaCount > config.MinimumReplicaCount {\n\t\t\tbackup := dm.s.backup.PartitionByID(partID)\n\t\t\terr = dm.destroyFragmentOnPartition(backup)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\ts.Lock()\n\tdelete(s.dmaps, name)\n\ts.Unlock()\n\n\treturn nil\n}\n\nfunc (s *Service) destroyCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tdestroyCmd, err := protocol.ParseDestroyCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(destroyCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tif destroyCmd.Local {\n\t\terr = s.destroyLocalDMap(destroyCmd.DMap)\n\t} else {\n\t\terr = dm.destroyOnCluster(s.ctx)\n\t}\n\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteString(protocol.StatusOK)\n}\n"
  },
  {
    "path": "internal/dmap/destroy_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Destroy_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tcluster.AddMember(nil)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mymap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t}\n\n\terr = dm.Destroy(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\t_, err = dm.Get(ctx, testutil.ToKey(i))\n\t\tif err != ErrKeyNotFound {\n\t\t\tt.Fatalf(\"Expected ErrKeyNotFound. Got: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestDMap_Destroy_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tc1 := testutil.NewConfig()\n\tc1.ReplicaCount = 2\n\te1 := testcluster.NewEnvironment(c1)\n\ts := cluster.AddMember(e1).(*Service)\n\n\tc2 := testutil.NewConfig()\n\tc2.ReplicaCount = 2\n\te2 := testcluster.NewEnvironment(c2)\n\tcluster.AddMember(e2)\n\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mymap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t}\n\n\terr = dm.Destroy(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\t_, err = dm.Get(ctx, testutil.ToKey(i))\n\t\tif err != ErrKeyNotFound {\n\t\t\tt.Fatalf(\"Expected ErrKeyNotFound. Got: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestDMap_Destroy_destroyOperation(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tcluster.AddMember(nil)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t}\n\tcmd := protocol.NewDestroy(\"mydmap\").Command(s.ctx)\n\trc := s.client.Get(s.rt.This().String())\n\terr = rc.Process(s.ctx, cmd)\n\trequire.NoError(t, err)\n\trequire.NoError(t, cmd.Err())\n\n\tfor i := 0; i < 100; i++ {\n\t\t_, err = dm.Get(ctx, testutil.ToKey(i))\n\t\trequire.ErrorIs(t, err, ErrKeyNotFound)\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/dmap.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\nconst nilTimeout = 0 * time.Second\n\nvar (\n\t// ErrKeyNotFound is returned when a key could not be found.\n\tErrKeyNotFound  = errors.New(\"key not found\")\n\tErrDMapNotFound = errors.New(\"dmap not found\")\n\tErrServerGone   = errors.New(\"server is gone\")\n)\n\n// DMap implements a single-hop distributed hash table.\ntype DMap struct {\n\tname         string\n\tfragmentName string\n\ts            *Service\n\tengine       storage.Engine\n\tconfig       *dmapConfig\n}\n\n// Name exposes name of the DMap.\nfunc (dm *DMap) Name() string {\n\treturn dm.name\n}\n\n// getDMap returns an initialized DMap instance, otherwise it returns ErrDMapNotFound.\nfunc (s *Service) getDMap(name string) (*DMap, error) {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\tdm, ok := s.dmaps[name]\n\tif !ok {\n\t\treturn nil, ErrDMapNotFound\n\t}\n\treturn dm, nil\n}\n\nfunc (s *Service) fragmentName(name string) string {\n\treturn fmt.Sprintf(\"dmap.%s\", name)\n}\n\n// NewDMap creates and returns a new DMap instance. It checks member count quorum\n// and bootstrapping status before creating a new DMap.\nfunc (s *Service) NewDMap(name string) (*DMap, error) {\n\t// Check operation status first:\n\t//\n\t// * Checks member count in the cluster, returns ErrClusterQuorum if\n\t//   the quorum value cannot be satisfied,\n\t// * Checks bootstrapping status and awaits for a short period before\n\t//   returning ErrRequest timeout.\n\tif err := s.rt.CheckMemberCountQuorum(); err != nil {\n\t\treturn nil, err\n\t}\n\t// An Olric node has to be bootstrapped to function properly.\n\tif err := s.rt.CheckBootstrap(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tdm, ok := s.dmaps[name]\n\tif ok {\n\t\treturn dm, nil\n\t}\n\n\tdm = &DMap{\n\t\tconfig:       &dmapConfig{},\n\t\tname:         name,\n\t\tfragmentName: s.fragmentName(name),\n\t\ts:            s,\n\t}\n\tif err := dm.config.load(s.config.DMaps, name); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// It's a shortcut.\n\tdm.engine = dm.config.engine.Implementation\n\ts.dmaps[name] = dm\n\treturn dm, nil\n}\n\n// getOrCreate is a shortcut function to create a new DMap or get an already initialized DMap instance.\nfunc (s *Service) getOrCreateDMap(name string) (*DMap, error) {\n\tdm, err := s.getDMap(name)\n\tif errors.Is(err, ErrDMapNotFound) {\n\t\treturn s.NewDMap(name)\n\t}\n\treturn dm, err\n}\n\nfunc (dm *DMap) getPartitionByHKey(hkey uint64, kind partitions.Kind) *partitions.Partition {\n\tvar part *partitions.Partition\n\tswitch {\n\tcase kind == partitions.PRIMARY:\n\t\tpart = dm.s.primary.PartitionByHKey(hkey)\n\tcase kind == partitions.BACKUP:\n\t\tpart = dm.s.backup.PartitionByHKey(hkey)\n\tdefault:\n\t\tpanic(\"unknown partition kind\")\n\t}\n\treturn part\n}\n\nfunc isKeyExpired(ttl int64) bool {\n\tif ttl == 0 {\n\t\treturn false\n\t}\n\n\t// convert nanoseconds to milliseconds\n\tres := (time.Now().UnixNano() / 1000000) >= ttl\n\tif res {\n\t\t// number of valid items removed from cache to free memory for new items.\n\t\tEvictedTotal.Increase(1)\n\t}\n\treturn res\n}\n"
  },
  {
    "path": "internal/dmap/dmap_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/olric-data/olric/internal/testcluster\"\n)\n\nfunc TestDMap_Name(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"mydmap\", dm.Name())\n}\n"
  },
  {
    "path": "internal/dmap/env.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n)\n\ntype env struct {\n\tctx       context.Context\n\tputConfig *PutConfig\n\thkey      uint64\n\ttimestamp int64\n\tdmap      string\n\tkey       string\n\tvalue     []byte\n\ttimeout   time.Duration\n\tkind      partitions.Kind\n\tfragment  *fragment\n}\n\nfunc newEnv(ctx context.Context) *env {\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\treturn &env{\n\t\tctx:       ctx,\n\t\tputConfig: &PutConfig{},\n\t\ttimestamp: time.Now().UnixNano(),\n\t\tkind:      partitions.PRIMARY,\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/eviction.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"golang.org/x/sync/semaphore\"\n)\n\n// isKeyIdleOnFragment is not a thread-safe function. It accesses underlying fragment for the given hkey.\nfunc (dm *DMap) isKeyIdleOnFragment(hkey uint64, f *fragment) bool {\n\tif dm.config == nil {\n\t\treturn false\n\t}\n\n\tif dm.config.maxIdleDuration.Nanoseconds() == 0 {\n\t\treturn false\n\t}\n\t// Maximum time in seconds for each entry to stay idle in the map.\n\t// It limits the lifetime of the entries relative to the time of the last\n\t// read or write access performed on them. The entries whose idle period\n\t// exceeds this limit are expired and evicted automatically.\n\tlastAccess, err := f.storage.GetLastAccess(hkey)\n\tif errors.Is(err, storage.ErrKeyNotFound) {\n\t\treturn false\n\t}\n\t//TODO: Handle other errors.\n\tttl := (dm.config.maxIdleDuration.Nanoseconds() + lastAccess) / 1000000\n\treturn isKeyExpired(ttl)\n}\n\nfunc (dm *DMap) isKeyIdle(hkey uint64) bool {\n\tpart := dm.getPartitionByHKey(hkey, partitions.PRIMARY)\n\tf, err := dm.loadFragment(part)\n\tif errors.Is(err, errFragmentNotFound) {\n\t\t// it's no possible to know whether the key is idle or not.\n\t\treturn false\n\t}\n\tif err != nil {\n\t\t// This could be a programming error and should never be happened on production systems.\n\t\tpanic(fmt.Sprintf(\"failed to get primary partition for: %d: %v\", hkey, err))\n\t}\n\tf.Lock()\n\tdefer f.Unlock()\n\treturn dm.isKeyIdleOnFragment(hkey, f)\n}\n\nfunc (s *Service) evictKeysAtBackground() {\n\tdefer s.wg.Done()\n\n\tnum := int64(runtime.NumCPU())\n\tif s.config.DMaps != nil && s.config.DMaps.NumEvictionWorkers != 0 {\n\t\tnum = s.config.DMaps.NumEvictionWorkers\n\t}\n\tsem := semaphore.NewWeighted(num)\n\tfor {\n\t\tif !s.isAlive() {\n\t\t\treturn\n\t\t}\n\n\t\tif err := sem.Acquire(s.ctx, 1); err != nil {\n\t\t\ts.log.V(3).Printf(\"[ERROR] Failed to acquire semaphore: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\ts.wg.Add(1)\n\t\tgo func() {\n\t\t\tdefer s.wg.Done()\n\t\t\tdefer sem.Release(1)\n\t\t\t// Good for developing tests.\n\t\t\ts.evictKeys()\n\t\t\tselect {\n\t\t\tcase <-time.After(100 * time.Millisecond):\n\t\t\tcase <-s.ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (s *Service) evictKeys() {\n\tpartID := uint64(rand.Intn(int(s.config.PartitionCount)))\n\tpart := s.primary.PartitionByID(partID)\n\tpart.Map().Range(func(name, tmp interface{}) bool {\n\t\tdmapName := strings.TrimPrefix(name.(string), \"dmap.\")\n\t\tf := tmp.(*fragment)\n\t\ts.scanFragmentForEviction(partID, dmapName, f)\n\t\t// this breaks the loop, we only scan one dmap instance per call\n\t\treturn false\n\t})\n}\n\nfunc (s *Service) scanFragmentForEviction(partID uint64, name string, f *fragment) {\n\t/*\n\t\tFrom Redis Docs:\n\t\t\t1- Test 20 random keys from the set of keys with an associated expire.\n\t\t\t2- Delete all the keys found expired.\n\t\t\t3- If more than 25% of keys were expired, start again from step 1.\n\t*/\n\n\t// We need limits to prevent CPU starvation. deleteOnCluster does some network operation\n\t// to delete keys from the backup nodes and the previous owners.\n\tvar maxKeyCount = 20\n\tvar maxTotalCount = 100\n\tvar totalCount = 0\n\n\tdm, err := s.getOrCreateDMap(name)\n\tif err != nil {\n\t\ts.log.V(3).Printf(\"[ERROR] Failed to load DMap: %s: %v\", name, err)\n\t\treturn\n\t}\n\n\tjanitor := func() bool {\n\t\tif totalCount > maxTotalCount {\n\t\t\t// Release the lock. Eviction will be triggered again.\n\t\t\treturn false\n\t\t}\n\t\tf.Lock()\n\t\tdefer f.Unlock()\n\t\tcount, keyCount := 0, 0\n\t\tf.storage.RangeHKey(func(hkey uint64) bool {\n\t\t\tkeyCount++\n\t\t\tif keyCount >= maxKeyCount {\n\t\t\t\t// this means 'break'.\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tttl, err := f.storage.GetTTL(hkey)\n\t\t\tif err != nil {\n\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to get TTL for: %d\", hkey)\n\t\t\t\treturn true // continue\n\t\t\t}\n\t\t\tkey, err := f.storage.GetKey(hkey)\n\t\t\tif err != nil {\n\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to get key for: %d\", hkey)\n\t\t\t\treturn true // continue\n\t\t\t}\n\n\t\t\tif isKeyExpired(ttl) || dm.isKeyIdleOnFragment(hkey, f) {\n\t\t\t\terr = dm.deleteOnCluster(hkey, key, f)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// It will be tried again.\n\t\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to delete expired key: %s on DMap: %s: %v\",\n\t\t\t\t\t\tkey, dm.name, err)\n\t\t\t\t\treturn true\n\t\t\t\t}\n\n\t\t\t\t// number of valid items removed from cache to free memory for new items.\n\t\t\t\tEvictedTotal.Increase(1)\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\n\t\ttotalCount += count\n\t\treturn count >= maxKeyCount/4\n\t}\n\n\tdefer func() {\n\t\tif totalCount > 0 {\n\t\t\tif s.log.V(6).Ok() {\n\t\t\t\ts.log.V(6).Printf(\"[DEBUG] Evicted key count is %d on PartID: %d\", totalCount, partID)\n\t\t\t}\n\t\t}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-f.ctx.Done():\n\t\t\t// the fragment is closed.\n\t\t\treturn\n\t\tcase <-s.ctx.Done():\n\t\t\t// The server has gone.\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\t// Call janitorWorker again until it returns false.\n\t\tif !janitor() {\n\t\t\treturn\n\t\t}\n\t}\n}\n\ntype lruItem struct {\n\tHKey       uint64\n\tLastAccess int64\n}\n\nfunc (dm *DMap) evictKeyWithLRU(e *env) error {\n\tvar idx = 1\n\tvar items []lruItem\n\n\t// Warning: fragment is already locked by DMap.Put. Be sure about that before editing this function.\n\n\t// Pick random items from the distributed map and sort them by accessedAt.\n\te.fragment.storage.Range(func(hkey uint64, e storage.Entry) bool {\n\t\tif idx >= dm.config.lruSamples {\n\t\t\treturn false\n\t\t}\n\t\tidx++\n\t\ti := lruItem{\n\t\t\tHKey:       hkey,\n\t\t\tLastAccess: e.LastAccess(),\n\t\t}\n\t\titems = append(items, i)\n\t\treturn true\n\t})\n\n\tif len(items) == 0 {\n\t\treturn fmt.Errorf(\"nothing found to expire with LRU\")\n\t}\n\n\tsort.Slice(items, func(i, j int) bool { return items[i].LastAccess < items[j].LastAccess })\n\t// Pick the first item to delete. It's the least recently used item in the sample.\n\titem := items[0]\n\tkey, err := e.fragment.storage.GetKey(item.HKey)\n\tif err != nil {\n\t\tif errors.Is(err, storage.ErrKeyNotFound) {\n\t\t\terr = ErrKeyNotFound\n\t\t\tGetMisses.Increase(1)\n\t\t}\n\t\treturn err\n\t}\n\t// Here we have a key/value pair to evict for making room for a new pair.\n\tif dm.s.log.V(6).Ok() {\n\t\tdm.s.log.V(6).Printf(\"[DEBUG] Evicted item on DMap: %s, key: %s with LRU\", e.dmap, key)\n\t}\n\terr = dm.deleteOnCluster(item.HKey, key, e.fragment)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// number of valid items removed from cache to free memory for new items.\n\tEvictedTotal.Increase(1)\n\treturn nil\n}\n"
  },
  {
    "path": "internal/dmap/eviction_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Eviction_TTL(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tpc := &PutConfig{\n\t\tHasEX: true,\n\t\tEX:    time.Millisecond,\n\t}\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), pc)\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(time.Millisecond)\n\tfor i := 0; i < 100; i++ {\n\t\ts1.evictKeys()\n\t\ts2.evictKeys()\n\t}\n\n\tlength := 0\n\tfor _, ins := range []*Service{s1, s2} {\n\t\tfor partID := uint64(0); partID < s1.config.PartitionCount; partID++ {\n\t\t\tpart := ins.primary.PartitionByID(partID)\n\t\t\tpart.Map().Range(func(k, v interface{}) bool {\n\t\t\t\tf := v.(*fragment)\n\t\t\t\tlength += f.storage.Stats().Length\n\t\t\t\treturn true\n\t\t\t})\n\t\t}\n\t}\n\trequire.NotEqual(t, 100, length)\n}\n\nfunc TestDMap_Eviction_Config_TTLDuration(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tc := testutil.NewConfig()\n\tc.DMaps = &config.DMaps{\n\t\tTTLDuration: time.Duration(0.1 * float64(time.Second)),\n\t\tEngine:      config.NewEngine(),\n\t}\n\trequire.NoError(t, c.DMaps.Engine.Sanitize())\n\n\te := testcluster.NewEnvironment(c)\n\ts := cluster.AddMember(e).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(200 * time.Millisecond)\n\tfor i := 0; i < 100; i++ {\n\t\ts.evictKeys()\n\t}\n\n\tlength := 0\n\tfor partID := uint64(0); partID < s.config.PartitionCount; partID++ {\n\t\tpart := s.primary.PartitionByID(partID)\n\t\tpart.Map().Range(func(k, v interface{}) bool {\n\t\t\tf := v.(*fragment)\n\t\t\tlength += f.storage.Stats().Length\n\t\t\treturn true\n\t\t})\n\t}\n\trequire.NotEqual(t, 100, length)\n}\n\nfunc TestDMap_Eviction_Config_MaxIdleDuration(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tc := testutil.NewConfig()\n\tc.DMaps = &config.DMaps{\n\t\tMaxIdleDuration: 100 * time.Millisecond,\n\t\tEngine:          config.NewEngine(),\n\t}\n\trequire.NoError(t, c.DMaps.Engine.Sanitize())\n\n\te := testcluster.NewEnvironment(c)\n\ts := cluster.AddMember(e).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(150 * time.Millisecond)\n\tfor i := 0; i < 100; i++ {\n\t\ts.evictKeys()\n\t}\n\n\tlength := 0\n\tfor partID := uint64(0); partID < s.config.PartitionCount; partID++ {\n\t\tpart := s.primary.PartitionByID(partID)\n\t\tpart.Map().Range(func(k, v interface{}) bool {\n\t\t\tf := v.(*fragment)\n\t\t\tlength += f.storage.Stats().Length\n\t\t\treturn true\n\t\t})\n\t}\n\n\trequire.NotEqual(t, 100, length)\n}\n\nfunc TestDMap_Eviction_LRU_Config_MaxKeys(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tc := testutil.NewConfig()\n\tc.DMaps = &config.DMaps{\n\t\tMaxKeys:        70,\n\t\tEvictionPolicy: config.LRUEviction,\n\t\tEngine:         config.NewEngine(),\n\t}\n\trequire.NoError(t, c.DMaps.Engine.Sanitize())\n\n\te := testcluster.NewEnvironment(c)\n\ts := cluster.AddMember(e).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\tlength := 0\n\tfor partID := uint64(0); partID < s.config.PartitionCount; partID++ {\n\t\tpart := s.primary.PartitionByID(partID)\n\t\tpart.Map().Range(func(k, v interface{}) bool {\n\t\t\tf := v.(*fragment)\n\t\t\tlength += f.storage.Stats().Length\n\t\t\treturn true\n\t\t})\n\t}\n\n\trequire.NotEqual(t, 100, length)\n}\n\nfunc TestDMap_Eviction_LRU_Config_MaxInuse(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tc := testutil.NewConfig()\n\tc.DMaps = &config.DMaps{\n\t\tMaxInuse:       2048,\n\t\tEvictionPolicy: config.LRUEviction,\n\t\tEngine:         testutil.NewEngineConfig(t),\n\t}\n\n\te := testcluster.NewEnvironment(c)\n\ts := cluster.AddMember(e).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\tlength := 0\n\tfor partID := uint64(0); partID < s.config.PartitionCount; partID++ {\n\t\tpart := s.primary.PartitionByID(partID)\n\t\tpart.Map().Range(func(k, v interface{}) bool {\n\t\t\tf := v.(*fragment)\n\t\t\tlength += f.storage.Stats().Length\n\t\t\treturn true\n\t\t})\n\t}\n\n\trequire.NotEqual(t, 100, length)\n}\n"
  },
  {
    "path": "internal/dmap/expire.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"time\"\n)\n\n// Expire updates the expiry for the given key. It returns ErrKeyNotFound if the\n// DB does not contain the key. It's thread-safe.\nfunc (dm *DMap) Expire(ctx context.Context, key string, timeout time.Duration) error {\n\tpc := &PutConfig{\n\t\tOnlyUpdateTTL: true,\n\t}\n\te := newEnv(ctx)\n\te.putConfig = pc\n\te.dmap = dm.name\n\te.key = key\n\te.timeout = timeout\n\treturn dm.put(e)\n}\n"
  },
  {
    "path": "internal/dmap/expire_handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc (s *Service) expireCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\texpireCmd, err := protocol.ParseExpireCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(expireCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tpc := &PutConfig{\n\t\tOnlyUpdateTTL: true,\n\t}\n\n\te := newEnv(s.ctx)\n\te.putConfig = pc\n\te.dmap = expireCmd.DMap\n\te.key = expireCmd.Key\n\te.timeout = expireCmd.Seconds\n\terr = dm.put(e)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteString(protocol.StatusOK)\n}\n\nfunc (s *Service) pexpireCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tpexpireCmd, err := protocol.ParsePExpireCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(pexpireCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tpc := &PutConfig{\n\t\tOnlyUpdateTTL: true,\n\t}\n\n\te := newEnv(s.ctx)\n\te.putConfig = pc\n\te.dmap = pexpireCmd.DMap\n\te.key = pexpireCmd.Key\n\te.timeout = pexpireCmd.Milliseconds\n\terr = dm.put(e)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteString(protocol.StatusOK)\n}\n"
  },
  {
    "path": "internal/dmap/expire_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Expire(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tkey := \"mykey\"\n\terr = dm.Put(ctx, key, \"myvalue\", nil)\n\trequire.NoError(t, err)\n\n\t_, err = dm.Get(ctx, key)\n\trequire.NoError(t, err)\n\n\terr = dm.Expire(ctx, key, time.Millisecond)\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\t// Get the value and check it.\n\t_, err = dm.Get(ctx, key)\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestDMap_Expire_ErrKeyNotFound(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Expire(context.Background(), \"mykey\", time.Millisecond)\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestDMap_Expire_expireCommandHandler(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tkey := \"mykey\"\n\terr = dm.Put(ctx, key, \"myvalue\", nil)\n\trequire.NoError(t, err)\n\n\tcmd := protocol.NewExpire(\"mydmap\", \"mykey\", time.Duration(0.1*float64(time.Second))).Command(s.ctx)\n\trc := s.client.Get(s.rt.This().String())\n\terr = rc.Process(ctx, cmd)\n\trequire.NoError(t, err)\n\n\t<-time.After(200 * time.Millisecond)\n\n\t// Get the value and check it.\n\t_, err = dm.Get(ctx, key)\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestDMap_Expire_pexpireCommandHandler(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tkey := \"mykey\"\n\terr = dm.Put(ctx, key, \"myvalue\", nil)\n\trequire.NoError(t, err)\n\n\tcmd := protocol.NewPExpire(\"mydmap\", \"mykey\", time.Millisecond).Command(s.ctx)\n\trc := s.client.Get(s.rt.This().String())\n\terr = rc.Process(ctx, cmd)\n\trequire.NoError(t, err)\n\n\t<-time.After(10 * time.Millisecond)\n\n\t// Get the value and check it.\n\t_, err = dm.Get(ctx, key)\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n"
  },
  {
    "path": "internal/dmap/fragment.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/events\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/vmihailenco/msgpack/v5\"\n)\n\ntype fragment struct {\n\tsync.RWMutex\n\n\tservice *Service\n\tstorage storage.Engine\n\tctx     context.Context\n\tcancel  context.CancelFunc\n}\n\nfunc (f *fragment) Stats() storage.Stats {\n\tf.RLock()\n\tdefer f.RUnlock()\n\n\treturn f.storage.Stats()\n}\n\nfunc (f *fragment) Compaction() (bool, error) {\n\tselect {\n\tcase <-f.ctx.Done():\n\t\t// fragment is closed or destroyed\n\t\treturn false, nil\n\tdefault:\n\t}\n\treturn f.storage.Compaction()\n}\n\nfunc (f *fragment) Destroy() error {\n\tselect {\n\tcase <-f.ctx.Done():\n\t\treturn f.storage.Destroy()\n\tdefault:\n\t}\n\treturn errors.New(\"fragment is not closed\")\n}\n\nfunc (f *fragment) Close() error {\n\tdefer f.cancel()\n\treturn f.storage.Close()\n}\n\nfunc (f *fragment) Name() string {\n\treturn \"DMap\"\n}\n\nfunc (f *fragment) Move(part *partitions.Partition, name string, owners []discovery.Member) error {\n\tf.Lock()\n\tdefer f.Unlock()\n\n\ti := f.storage.TransferIterator()\n\tif !i.Next() {\n\t\treturn nil\n\t}\n\n\tpayload, index, err := i.Export()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfp := &fragmentPack{\n\t\tPartID:  part.ID(),\n\t\tKind:    part.Kind(),\n\t\tName:    strings.TrimPrefix(name, \"dmap.\"),\n\t\tPayload: payload,\n\t}\n\tvalue, err := msgpack.Marshal(fp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, owner := range owners {\n\t\tif f.service.config.EnableClusterEventsChannel {\n\t\t\te := &events.FragmentMigrationEvent{\n\t\t\t\tKind:          events.KindFragmentMigrationEvent,\n\t\t\t\tSource:        f.service.rt.This().String(),\n\t\t\t\tTarget:        owner.String(),\n\t\t\t\tDataStructure: \"dmap\",\n\t\t\t\tPartitionID:   part.ID(),\n\t\t\t\tIdentifier:    fp.Name,\n\t\t\t\tLength:        len(value),\n\t\t\t\tIsBackup:      part.Kind() == partitions.BACKUP,\n\t\t\t\tTimestamp:     time.Now().UnixNano(),\n\t\t\t}\n\t\t\tf.service.wg.Add(1)\n\t\t\tgo f.service.publishEvent(e)\n\t\t}\n\n\t\tcmd := protocol.NewMoveFragment(value).Command(f.service.ctx)\n\t\trc := f.service.client.Get(owner.String())\n\t\terr = rc.Process(f.service.ctx, cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := cmd.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn i.Drop(index)\n}\n\nfunc (dm *DMap) newFragment() (*fragment, error) {\n\tc := storage.NewConfig(dm.config.engine.Config)\n\tengine, err := dm.engine.Fork(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tengine.SetLogger(dm.s.config.Logger)\n\terr = engine.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &fragment{\n\t\tservice: dm.s,\n\t\tstorage: engine,\n\t\tctx:     ctx,\n\t\tcancel:  cancel,\n\t}, nil\n}\n\nfunc (dm *DMap) loadOrCreateFragment(part *partitions.Partition) (*fragment, error) {\n\tpart.Lock()\n\tdefer part.Unlock()\n\n\t// Critical section here. It should be protected by a lock.\n\tfg, ok := part.Map().Load(dm.fragmentName)\n\tif ok {\n\t\t// We already have the fragment.\n\t\treturn fg.(*fragment), nil\n\t}\n\n\tf, err := dm.newFragment()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpart.Map().Store(dm.fragmentName, f)\n\treturn f, nil\n}\n\nfunc (dm *DMap) loadFragment(part *partitions.Partition) (*fragment, error) {\n\tf, ok := part.Map().Load(dm.fragmentName)\n\tif !ok {\n\t\treturn nil, errFragmentNotFound\n\t}\n\treturn f.(*fragment), nil\n}\n\nvar _ partitions.Fragment = (*fragment)(nil)\n"
  },
  {
    "path": "internal/dmap/fragment_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n)\n\nfunc TestDMap_Fragment(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdm, err := s.NewDMap(\"mydmap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tt.Run(\"loadFragment\", func(t *testing.T) {\n\t\tpart := s.primary.PartitionByID(1)\n\t\t_, err = dm.loadFragment(part)\n\t\tif !errors.Is(err, errFragmentNotFound) {\n\t\t\tt.Fatalf(\"Expected %v. Got: %v\", errFragmentNotFound, err)\n\t\t}\n\t})\n\n\tt.Run(\"newFragment\", func(t *testing.T) {\n\t\t_, err := dm.newFragment()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"loadFragment -- errFragmentNotFound\", func(t *testing.T) {\n\t\tpart := dm.getPartitionByHKey(123, partitions.PRIMARY)\n\t\t_, err := dm.loadFragment(part)\n\t\tif !errors.Is(err, errFragmentNotFound) {\n\t\t\tt.Fatalf(\"Expected %v. Got: %v\", errFragmentNotFound, err)\n\t\t}\n\t})\n\n\tt.Run(\"loadOrCreateFragment\", func(t *testing.T) {\n\t\tpart := dm.getPartitionByHKey(123, partitions.PRIMARY)\n\t\t_, err = dm.loadOrCreateFragment(part)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\n\t\t_, err := dm.loadFragment(part)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t})\n}\n\nfunc TestDMap_Fragment_Concurrent_Access(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdm, err := s.NewDMap(\"mydmap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tpart := dm.getPartitionByHKey(123, partitions.PRIMARY)\n\n\tvar mtx sync.RWMutex\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 1000; i++ {\n\t\twg.Add(1)\n\t\tgo func(idx int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tf, err := dm.loadOrCreateFragment(part)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Expected nil. Got: %v\", err)\n\t\t\t}\n\n\t\t\te := f.storage.NewEntry()\n\t\t\te.SetKey(testutil.ToKey(idx))\n\n\t\t\tmtx.Lock()\n\t\t\t// storage engine is not thread-safe\n\t\t\terr = f.storage.Put(uint64(idx), e)\n\t\t\tmtx.Unlock()\n\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Expected nil. Got: %v\", err)\n\t\t\t}\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\n\tf, err := dm.loadFragment(part)\n\tif err != nil {\n\t\tt.Errorf(\"Expected nil. Got: %v\", err)\n\t}\n\tfor i := 0; i < 1000; i++ {\n\t\tentry, err := f.storage.Get(uint64(i))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t\tif entry.Key() != testutil.ToKey(i) {\n\t\t\tt.Fatalf(\"Expected key: %s. Got: %s\", testutil.ToKey(i), entry.Key())\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/get.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/stats\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\n// Entry is a DMap entry with its metadata.\ntype Entry struct {\n\tKey       string\n\tValue     interface{}\n\tTTL       int64\n\tTimestamp int64\n}\n\nvar (\n\t// GetMisses is the number of entries that have been requested and not found\n\tGetMisses = stats.NewInt64Counter()\n\n\t// GetHits is the number of entries that have been requested and found present\n\tGetHits = stats.NewInt64Counter()\n\n\t// EvictedTotal is the number of entries removed from cache to free memory for new entries.\n\tEvictedTotal = stats.NewInt64Counter()\n)\n\n// ErrReadQuorum means that read quorum cannot be reached to operate.\nvar ErrReadQuorum = errors.New(\"read quorum cannot be reached\")\n\ntype version struct {\n\thost  *discovery.Member\n\tentry storage.Entry\n}\n\n// getOnFragment retrieves an entry from the associated fragment based on the provided environment details.\n// It returns the found entry or an error if the key is not found, too large, or expired.\nfunc (dm *DMap) getOnFragment(e *env) (storage.Entry, error) {\n\tpart := dm.getPartitionByHKey(e.hkey, e.kind)\n\tf, err := dm.loadFragment(part)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.RLock()\n\tdefer f.RUnlock()\n\n\tentry, err := f.storage.Get(e.hkey)\n\tswitch err {\n\tcase storage.ErrKeyNotFound:\n\t\terr = ErrKeyNotFound\n\tcase storage.ErrKeyTooLarge:\n\t\terr = ErrKeyTooLarge\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif isKeyExpired(entry.TTL()) {\n\t\treturn nil, ErrKeyNotFound\n\t}\n\treturn entry, nil\n}\n\n// lookupOnPreviousOwner retrieves the version of a key from a previous owner in the cluster.\n// It communicates with the specified owner node and decodes the value into a version object.\nfunc (dm *DMap) lookupOnPreviousOwner(owner *discovery.Member, key string) (*version, error) {\n\tcmd := protocol.NewGetEntry(dm.name, key).Command(dm.s.ctx)\n\trc := dm.s.client.Get(owner.String())\n\terr := rc.Process(dm.s.ctx, cmd)\n\tif err != nil {\n\t\treturn nil, protocol.ConvertError(err)\n\t}\n\tvalue, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn nil, protocol.ConvertError(err)\n\t}\n\n\tv := &version{host: owner}\n\te := dm.engine.NewEntry()\n\te.Decode(value)\n\tv.entry = e\n\treturn v, nil\n}\n\nfunc (dm *DMap) valueToVersion(value storage.Entry) *version {\n\tthis := dm.s.rt.This()\n\treturn &version{\n\t\thost:  &this,\n\t\tentry: value,\n\t}\n}\n\n// lookupOnThisNode searches for a key's version on the current node, considering\n// only the primary partition owner.\nfunc (dm *DMap) lookupOnThisNode(hkey uint64, key string) *version {\n\t// Check on localhost, the partition owner.\n\tpart := dm.getPartitionByHKey(hkey, partitions.PRIMARY)\n\tf, err := dm.loadFragment(part)\n\tif err != nil {\n\t\tif !errors.Is(err, errFragmentNotFound) {\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to get DMap fragment: %v\", err)\n\t\t}\n\t\treturn dm.valueToVersion(nil)\n\t}\n\tf.RLock()\n\tdefer f.RUnlock()\n\n\tvalue, err := f.storage.Get(hkey)\n\tif err != nil {\n\t\tif !errors.Is(err, storage.ErrKeyNotFound) {\n\t\t\t// still need to use \"ver\". just log this error.\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to get key: %s on %s: %s\", key, dm.name, err)\n\t\t}\n\t\treturn dm.valueToVersion(nil)\n\t}\n\t// We found the key\n\t//\n\t// LRU and MaxIdleDuration eviction policies are only valid on\n\t// the partition owner. Normally, we shouldn't need to retrieve the keys\n\t// from the backup or the previous owners. When the fsck merge\n\t// a fragmented partition or recover keys from a backup, Olric\n\t// continue maintaining a reliable access log.\n\treturn dm.valueToVersion(value)\n}\n\n// lookupOnOwners collects versions of a key/value pair on the partition owner\n// by including previous partition owners.\nfunc (dm *DMap) lookupOnOwners(hkey uint64, key string) []*version {\n\towners := dm.s.primary.PartitionOwnersByHKey(hkey)\n\tif len(owners) == 0 {\n\t\tpanic(\"partition owners list cannot be empty\")\n\t}\n\n\tvar (\n\t\twg       sync.WaitGroup\n\t\tmtx      sync.Mutex\n\t\tversions []*version\n\t)\n\tversions = append(versions, dm.lookupOnThisNode(hkey, key))\n\n\t// Run a query on the previous owners.\n\t// Traverse in reverse order. Except from the latest host, this one.\n\tfor i := len(owners) - 2; i >= 0; i-- {\n\t\towner := owners[i]\n\n\t\twg.Add(1)\n\t\tgo func(member *discovery.Member) {\n\t\t\tdefer wg.Done()\n\n\t\t\tv, err := dm.lookupOnPreviousOwner(member, key)\n\t\t\tif err != nil {\n\t\t\t\tif dm.s.log.V(6).Ok() {\n\t\t\t\t\tdm.s.log.V(6).Printf(\"[ERROR] Failed to call get on a previous \"+\n\t\t\t\t\t\t\"primary owner: %s: %v\", member, err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmtx.Lock()\n\t\t\t// Ignore failed owners. The balancer will wipe out\n\t\t\t// the data on those hosts.\n\t\t\tversions = append(versions, v)\n\t\t\tmtx.Unlock()\n\t\t}(&owner)\n\t}\n\n\twg.Wait()\n\treturn versions\n}\n\nfunc (dm *DMap) sortVersions(versions []*version) []*version {\n\tsort.Slice(versions,\n\t\tfunc(i, j int) bool {\n\t\t\treturn versions[i].entry.Timestamp() >= versions[j].entry.Timestamp()\n\t\t},\n\t)\n\t// Explicit is better than implicit.\n\treturn versions\n}\n\n// sanitizeAndSortVersions removes nil versions from the input slice and sorts\n// the remaining versions by recency.\nfunc (dm *DMap) sanitizeAndSortVersions(versions []*version) []*version {\n\tvar sanitized []*version\n\t// We use versions slice for read-repair. Clear nil values first.\n\tfor _, ver := range versions {\n\t\tif ver.entry != nil {\n\t\t\tsanitized = append(sanitized, ver)\n\t\t}\n\t}\n\tif len(sanitized) <= 1 {\n\t\treturn sanitized\n\t}\n\treturn dm.sortVersions(sanitized)\n}\n\n// lookupOnReplicas retrieves data from replica nodes for the given hash key and\n// key, returning a list of versioned entries.\nfunc (dm *DMap) lookupOnReplicas(hkey uint64, key string) []*version {\n\t// Check replicas\n\tvar (\n\t\twg  sync.WaitGroup\n\t\tmtx sync.Mutex\n\t)\n\n\treplicas := dm.s.backup.PartitionOwnersByHKey(hkey)\n\tversions := make([]*version, 0, len(replicas))\n\tfor _, replica := range replicas {\n\t\twg.Add(1)\n\t\tgo func(host *discovery.Member) {\n\t\t\tdefer wg.Done()\n\n\t\t\tcmd := protocol.NewGetEntry(dm.name, key).SetReplica().Command(dm.s.ctx)\n\t\t\trc := dm.s.client.Get(host.String())\n\t\t\terr := rc.Process(dm.s.ctx, cmd)\n\t\t\terr = protocol.ConvertError(err)\n\t\t\tif err != nil {\n\t\t\t\tif dm.s.log.V(6).Ok() {\n\t\t\t\t\tdm.s.log.V(6).Printf(\"[DEBUG] Failed to call get on\"+\n\t\t\t\t\t\t\" a replica owner: %s: %v\", host, err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvalue, err := cmd.Bytes()\n\t\t\terr = protocol.ConvertError(err)\n\t\t\tif err != nil {\n\t\t\t\tif dm.s.log.V(6).Ok() {\n\t\t\t\t\tdm.s.log.V(6).Printf(\"[DEBUG] Failed to call get on\"+\n\t\t\t\t\t\t\" a replica owner: %s: %v\", host, err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tv := &version{host: host}\n\t\t\te := dm.engine.NewEntry()\n\t\t\te.Decode(value)\n\t\t\tv.entry = e\n\n\t\t\tmtx.Lock()\n\t\t\tversions = append(versions, v)\n\t\t\tmtx.Unlock()\n\t\t}(&replica)\n\t}\n\twg.Wait()\n\treturn versions\n}\n\n// readRepair performs synchronization of inconsistent replicas by applying the\n// winning version to out-of-sync nodes.\nfunc (dm *DMap) readRepair(winner *version, versions []*version) {\n\tvar wg sync.WaitGroup\n\tfor _, value := range versions {\n\n\t\t// Check the timestamp first, we apply the \"last write wins\" rule here.\n\t\tif value.entry != nil && winner.entry.Timestamp() == value.entry.Timestamp() {\n\t\t\tcontinue\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func(v *version) {\n\t\t\tdefer wg.Done()\n\n\t\t\t// Sync\n\t\t\ttmp := *v.host\n\t\t\tif tmp.CompareByID(dm.s.rt.This()) {\n\t\t\t\thkey := partitions.HKey(dm.name, winner.entry.Key())\n\t\t\t\tpart := dm.getPartitionByHKey(hkey, partitions.PRIMARY)\n\t\t\t\tf, err := dm.loadOrCreateFragment(part)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to get or create the fragment for: %s on %s: %v\",\n\t\t\t\t\t\twinner.entry.Key(), dm.name, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tf.Lock()\n\t\t\t\te := newEnv(context.Background())\n\t\t\t\te.hkey = hkey\n\t\t\t\te.fragment = f\n\t\t\t\terr = dm.putEntryOnFragment(e, winner.entry)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to synchronize with replica: %v\", err)\n\t\t\t\t}\n\t\t\t\tf.Unlock()\n\t\t\t} else {\n\t\t\t\t// If readRepair is enabled, this function is called by every GET request.\n\t\t\t\tcmd := protocol.NewPutEntry(dm.name, winner.entry.Key(), winner.entry.Encode()).Command(dm.s.ctx)\n\t\t\t\trc := dm.s.client.Get(v.host.String())\n\t\t\t\terr := rc.Process(dm.s.ctx, cmd)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to synchronize replica %s: %v\", v.host, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = cmd.Err()\n\t\t\t\tif err != nil {\n\t\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to synchronize replica %s: %v\", v.host, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}(value)\n\t}\n\twg.Wait()\n}\n\n// getOnCluster retrieves the storage.Entry for a given hashed key and key string\n// from cluster nodes with read quorum. It ensures data consistency via read repair\n// and returns ErrKeyNotFound or ErrReadQuorum if conditions aren't met.\nfunc (dm *DMap) getOnCluster(hkey uint64, key string) (storage.Entry, error) {\n\t// RUnlock should not be called with a defer statement here because\n\t//  the readRepair function may call putOnFragment function which needs a write\n\t// lock. Please remember calling RUnlock before returning here.\n\tversions := dm.lookupOnOwners(hkey, key)\n\tif dm.s.config.ReadQuorum >= config.MinimumReplicaCount {\n\t\tv := dm.lookupOnReplicas(hkey, key)\n\t\tversions = append(versions, v...)\n\t}\n\n\tif len(versions) < dm.s.config.ReadQuorum {\n\t\treturn nil, ErrReadQuorum\n\t}\n\n\tsorted := dm.sanitizeAndSortVersions(versions)\n\tif len(sorted) == 0 {\n\t\t// We checked everywhere, it's not here.\n\t\treturn nil, ErrKeyNotFound\n\t}\n\n\tif len(sorted) < dm.s.config.ReadQuorum {\n\t\treturn nil, ErrReadQuorum\n\t}\n\n\t// The most up-to-date version of the values.\n\twinner := sorted[0]\n\tif isKeyExpired(winner.entry.TTL()) || dm.isKeyIdle(hkey) {\n\t\treturn nil, ErrKeyNotFound\n\t}\n\n\tif dm.s.config.ReadRepair {\n\t\t// Parallel read operations may propagate different versions of\n\t\t// the same key/value pair. The rule is simple: last write wins.\n\t\tdm.readRepair(winner, versions)\n\t}\n\treturn winner.entry, nil\n}\n\n// Get gets the value for the given key. It returns ErrKeyNotFound if the DB\n// does not contain the key. It's thread-safe. It is safe to modify the contents\n// of the returned value.\nfunc (dm *DMap) Get(ctx context.Context, key string) (storage.Entry, error) {\n\thkey := partitions.HKey(dm.name, key)\n\tmember := dm.s.primary.PartitionByHKey(hkey).Owner()\n\n\t// We are on the partition owner\n\tif member.CompareByName(dm.s.rt.This()) {\n\t\tentry, err := dm.getOnCluster(hkey, key)\n\t\tif errors.Is(err, ErrKeyNotFound) {\n\t\t\tGetMisses.Increase(1)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// number of keys that have been requested and found present\n\t\tGetHits.Increase(1)\n\n\t\treturn entry, nil\n\t}\n\n\t// Redirect to the partition owner\n\tcmd := protocol.NewGet(dm.name, key).SetRaw().Command(dm.s.ctx)\n\trc := dm.s.client.Get(member.String())\n\terr := rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn nil, protocol.ConvertError(err)\n\t}\n\n\tvalue, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn nil, protocol.ConvertError(err)\n\t}\n\n\t// number of keys that have been requested and found present\n\tGetHits.Increase(1)\n\n\tentry := dm.engine.NewEntry()\n\tentry.Decode(value)\n\treturn entry, nil\n}\n"
  },
  {
    "path": "internal/dmap/get_handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc (s *Service) getCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tgetCmd, err := protocol.ParseGetCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tdm, err := s.getOrCreateDMap(getCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\traw, err := dm.Get(s.ctx, getCmd.Key)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tif getCmd.Raw {\n\t\tconn.WriteBulk(raw.Encode())\n\t\treturn\n\t}\n\tconn.WriteBulk(raw.Value())\n}\n\nfunc (s *Service) getEntryCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tgetEntryCmd, err := protocol.ParseGetEntryCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tdm, err := s.getOrCreateDMap(getEntryCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tvar kind = partitions.PRIMARY\n\tif getEntryCmd.Replica {\n\t\tkind = partitions.BACKUP\n\t}\n\n\te := newEnv(s.ctx)\n\te.dmap = getEntryCmd.DMap\n\te.key = getEntryCmd.Key\n\te.hkey = partitions.HKey(getEntryCmd.DMap, getEntryCmd.Key)\n\te.kind = kind\n\tnt, err := dm.getOnFragment(e)\n\tif err == errFragmentNotFound {\n\t\terr = ErrKeyNotFound\n\t}\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\t// We found it.\n\tconn.WriteBulk(nt.Encode())\n}\n"
  },
  {
    "path": "internal/dmap/get_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/cluster/routingtable\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Get_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\n\t// Call DMap.Put on S1\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\tgr, err := dm.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), gr.Value())\n\t}\n}\n\nfunc TestDMap_Get_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\t// Call DMap.Put on S1\n\tdm1, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\n\t}\n\n\t// Call DMap.Get on S2\n\tdm2, err := s2.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\tfor i := 0; i < 10; i++ {\n\t\tres, err := dm2.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), res.Value())\n\t}\n}\n\nfunc TestDMap_Get_Lookup(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\tcluster.AddMember(nil)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\n\t// Call DMap.Put on S1\n\tdm1, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\ts3 := cluster.AddMember(nil).(*Service)\n\t// Call DMap.Get on S3\n\tdm3, err := s3.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\tgr, err := dm3.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), gr.Value())\n\t}\n}\n\nfunc TestDMap_Get_NilValue(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\t// Call DMap.Put on S1\n\tdm, err := s.NewDMap(\"mydmap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\terr = dm.Put(ctx, \"foobar\", nil, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tgr, err := dm.Get(ctx, \"foobar\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\trequire.Equal(t, []byte{}, gr.Value())\n\n\t_, err = dm.Delete(ctx, \"foobar\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\t_, err = dm.Get(ctx, \"foobar\")\n\tif err != ErrKeyNotFound {\n\t\tt.Fatalf(\"Expected ErrKeyNotFound. Got: %v\", err)\n\t}\n}\n\nfunc TestDMap_Get_NilValue_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\n\t// Call DMap.Put on S1\n\tdm, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"foobar\", nil, nil)\n\trequire.NoError(t, err)\n\n\tdm2, err := s2.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tgr, err := dm2.Get(ctx, \"foobar\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, []byte{}, gr.Value())\n\n\t_, err = dm2.Delete(ctx, \"foobar\")\n\trequire.NoError(t, err)\n\n\t_, err = dm2.Get(ctx, \"foobar\")\n\trequire.ErrorIs(t, err, ErrKeyNotFound)\n}\n\nfunc TestDMap_Put_ReadQuorum(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\t// Create DMap services with custom configuration\n\tc := testutil.NewConfig()\n\tc.ReplicaCount = 2\n\tc.ReadQuorum = 2\n\te := testcluster.NewEnvironment(c)\n\ts := cluster.AddMember(e).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\t_, err = dm.Get(ctx, testutil.ToKey(1))\n\tif err != ErrReadQuorum {\n\t\tt.Fatalf(\"Expected ErrReadQuorum. Got: %v\", err)\n\t}\n}\n\nfunc TestDMap_Get_ReadRepair(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tc1 := testutil.NewConfig()\n\tc1.ReadRepair = true\n\tc1.ReplicaCount = 2\n\te1 := testcluster.NewEnvironment(c1)\n\ts1 := cluster.AddMember(e1).(*Service)\n\n\tc2 := testutil.NewConfig()\n\tc2.ReadRepair = true\n\tc2.ReplicaCount = 2\n\te2 := testcluster.NewEnvironment(c2)\n\ts2 := cluster.AddMember(e2).(*Service)\n\n\tdefer cluster.Shutdown()\n\n\t// Call DMap.Put on S1\n\tdm1, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\terr = s2.Shutdown(context.Background())\n\trequire.NoError(t, err)\n\n\trt := e2.Get(\"routingtable\").(*routingtable.RoutingTable)\n\terr = rt.Shutdown(context.Background())\n\trequire.NoError(t, err)\n\n\tc3 := testutil.NewConfig()\n\tc3.ReadRepair = true\n\tc3.ReplicaCount = 2\n\te3 := testcluster.NewEnvironment(c3)\n\ts3 := cluster.AddMember(e3).(*Service)\n\n\t// Call DMap.Get on S2\n\tdm2, err := s3.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\tgr, err := dm2.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), gr.Value())\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"github.com/olric-data/olric/internal/protocol\"\n)\n\nfunc (s *Service) RegisterHandlers() {\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Put, s.putCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Get, s.getCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Del, s.delCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.DelEntry, s.delEntryCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.GetEntry, s.getEntryCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.PutEntry, s.putEntryCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Expire, s.expireCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.PExpire, s.pexpireCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Destroy, s.destroyCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Scan, s.scanCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Incr, s.incrCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Decr, s.decrCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.GetPut, s.getPutCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.IncrByFloat, s.incrByFloatCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Lock, s.lockCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.Unlock, s.unlockCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.LockLease, s.lockLeaseCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.DMap.PLockLease, s.plockLeaseCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.Internal.MoveFragment, s.moveFragmentCommandHandler)\n}\n"
  },
  {
    "path": "internal/dmap/janitor.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n)\n\nfunc wipeOutFragment(part *partitions.Partition, name string, f *fragment) error {\n\t// Stop background services if there is any.\n\terr := f.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Destroy data on-disk or in-memory.\n\terr = f.Destroy()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Delete the fragment from partition.\n\tpart.Map().Delete(name)\n\treturn nil\n}\n\nfunc (s *Service) janitor(part *partitions.Partition) {\n\tpart.Map().Range(func(name, tmp interface{}) bool {\n\t\tif !strings.HasPrefix(name.(string), \"dmap.\") {\n\t\t\t// This fragment belongs to a different data structure.\n\t\t\treturn true\n\t\t}\n\n\t\tf := tmp.(*fragment)\n\t\tf.Lock()\n\t\tdefer f.Unlock()\n\n\t\tif f.storage.Stats().Length != 0 {\n\t\t\t// It's not empty. Continue scanning.\n\t\t\treturn true\n\t\t}\n\n\t\terr := wipeOutFragment(part, name.(string), f)\n\t\tif err != nil {\n\t\t\ts.log.V(3).Printf(\"[ERROR] Failed to delete empty DMap fragment (kind: %s): %s on PartID: %d\",\n\t\t\t\tpart.Kind(), name, part.ID())\n\t\t\t// continue scanning\n\t\t\treturn true\n\t\t}\n\n\t\ts.log.V(4).Printf(\"[INFO] Empty DMap fragment (kind: %s) has been deleted: %s on PartID: %d\",\n\t\t\tpart.Kind(), name, part.ID())\n\t\treturn true\n\t})\n}\n\nfunc (s *Service) deleteEmptyFragments() {\n\tfor partID := uint64(0); partID < s.config.PartitionCount; partID++ {\n\t\t// Clean stale DMap fragments on partition table\n\t\tpart := s.primary.PartitionByID(partID)\n\t\ts.janitor(part)\n\n\t\t// Clean stale DMap fragments on backup partition table\n\t\tbackup := s.backup.PartitionByID(partID)\n\t\ts.janitor(backup)\n\t}\n}\n\nfunc (s *Service) janitorWorker() {\n\tdefer s.wg.Done()\n\ttimer := time.NewTimer(s.config.DMaps.CheckEmptyFragmentsInterval)\n\tdefer timer.Stop()\n\n\tfor {\n\t\ttimer.Reset(s.config.DMaps.CheckEmptyFragmentsInterval)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\ts.deleteEmptyFragments()\n\t\tcase <-s.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/lock.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/rand\"\n\t\"encoding/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n)\n\nvar (\n\t// ErrLockNotAcquired is returned when the requested lock could not be acquired\n\tErrLockNotAcquired = errors.New(\"lock not acquired\")\n\n\t// ErrNoSuchLock is returned when the requested lock does not exist\n\tErrNoSuchLock = errors.New(\"no such lock\")\n)\n\n// unlockKey tries to unlock the lock by verifying the lock with token.\nfunc (dm *DMap) unlockKey(ctx context.Context, key string, token []byte) error {\n\tlkey := dm.name + key\n\t// Only one unlockKey should work for a given key.\n\tdm.s.locker.Lock(lkey)\n\tdefer func() {\n\t\terr := dm.s.locker.Unlock(lkey)\n\t\tif err != nil {\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to release the fine grained lock for key: %s on DMap: %s: %v\", key, dm.name, err)\n\t\t}\n\t}()\n\n\t// get the key to check its value\n\tentry, err := dm.Get(ctx, key)\n\tif errors.Is(err, ErrKeyNotFound) {\n\t\treturn ErrNoSuchLock\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// the lock is released by the node(timeout) or the user\n\tif !bytes.Equal(entry.Value(), token) {\n\t\treturn ErrNoSuchLock\n\t}\n\n\t// release it.\n\t_, err = dm.deleteKeys(ctx, key)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unlock failed because of delete: %w\", err)\n\t}\n\treturn nil\n}\n\n// Unlock takes key and token and tries to unlock the key.\n// It redirects the request to the partition owner, if required.\nfunc (dm *DMap) Unlock(ctx context.Context, key string, token []byte) error {\n\thkey := partitions.HKey(dm.name, key)\n\tmember := dm.s.primary.PartitionByHKey(hkey).Owner()\n\tif member.CompareByName(dm.s.rt.This()) {\n\t\treturn dm.unlockKey(ctx, key, token)\n\t}\n\n\tcmd := protocol.NewUnlock(dm.name, key, hex.EncodeToString(token)).Command(dm.s.ctx)\n\trc := dm.s.client.Get(member.String())\n\terr := rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn protocol.ConvertError(err)\n\t}\n\treturn protocol.ConvertError(cmd.Err())\n}\n\n// tryLock takes a deadline and env and sets a key-value pair by using\n// Put with NX and PX commands. It tries to acquire the lock 100 times per second\n// if the lock is already acquired. It returns ErrLockNotAcquired if the deadline exceeds.\nfunc (dm *DMap) tryLock(e *env, deadline time.Duration) error {\n\terr := dm.put(e)\n\tif err == nil {\n\t\treturn nil\n\t}\n\t// If it returns ErrKeyFound, the lock is already acquired.\n\tif !errors.Is(err, ErrKeyFound) {\n\t\t// something went wrong\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithTimeout(e.ctx, deadline)\n\tdefer cancel()\n\n\ttimer := time.NewTimer(10 * time.Millisecond)\n\tdefer timer.Stop()\n\n\t// Try to acquire lock.\nLOOP:\n\tfor {\n\t\ttimer.Reset(10 * time.Millisecond)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\terr = dm.put(e)\n\t\t\tif errors.Is(err, ErrKeyFound) {\n\t\t\t\t// not released by the other process/goroutine. try again.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\t// something went wrong.\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// Acquired! Quit without error.\n\t\t\tbreak LOOP\n\t\tcase <-ctx.Done():\n\t\t\t// Deadline exceeded. Quit with an error.\n\t\t\treturn ErrLockNotAcquired\n\t\tcase <-dm.s.ctx.Done():\n\t\t\treturn fmt.Errorf(\"server is gone\")\n\t\t}\n\t}\n\treturn nil\n}\n\n// Lock prepares a token and env, then calls tryLock\nfunc (dm *DMap) Lock(ctx context.Context, key string, timeout, deadline time.Duration) ([]byte, error) {\n\ttoken := make([]byte, 16)\n\t_, err := rand.Read(token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar pc PutConfig\n\tpc.HasNX = true\n\tif timeout.Milliseconds() != 0 {\n\t\tpc.HasPX = true\n\t\tpc.PX = timeout\n\t}\n\n\te := newEnv(ctx)\n\te.putConfig = &pc\n\te.dmap = dm.name\n\te.key = key\n\te.value = token\n\terr = dm.tryLock(e, deadline)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn token, nil\n}\n\n// leaseKey tries to update the expiry of the key by verifying token.\nfunc (dm *DMap) leaseKey(ctx context.Context, key string, token []byte, timeout time.Duration) error {\n\tlkey := dm.name + key\n\t// Only one unlockKey should work for a given key.\n\tdm.s.locker.Lock(lkey)\n\tdefer func() {\n\t\terr := dm.s.locker.Unlock(lkey)\n\t\tif err != nil {\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to release the fine grained lock for key: %s on DMap: %s: %v\", key, dm.name, err)\n\t\t}\n\t}()\n\n\t// get the key to check its value\n\te, err := dm.Get(ctx, key)\n\tif errors.Is(err, ErrKeyNotFound) {\n\t\treturn ErrNoSuchLock\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// the lock is released by the node(timeout) or the user\n\tif !bytes.Equal(e.Value(), token) {\n\t\treturn ErrNoSuchLock\n\t}\n\n\tttl := e.TTL()\n\tif ttl > 0 && (time.Now().UnixNano()/1000000) >= ttl {\n\t\t// already expired\n\t\treturn ErrNoSuchLock\n\t}\n\n\t// update\n\terr = dm.Expire(ctx, key, timeout)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"lease failed: %w\", err)\n\t}\n\treturn nil\n}\n\n// Lease takes key and token and tries to update the expiry with duration.\n// It redirects the request to the partition owner, if required.\nfunc (dm *DMap) Lease(ctx context.Context, key string, token []byte, timeout time.Duration) error {\n\thkey := partitions.HKey(dm.name, key)\n\tmember := dm.s.primary.PartitionByHKey(hkey).Owner()\n\tif member.CompareByName(dm.s.rt.This()) {\n\t\treturn dm.leaseKey(ctx, key, token, timeout)\n\t}\n\n\tcmd := protocol.NewLockLease(dm.name, key, hex.EncodeToString(token), timeout.Seconds()).Command(dm.s.ctx)\n\trc := dm.s.client.Get(member.String())\n\terr := rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn protocol.ConvertError(err)\n\t}\n\treturn protocol.ConvertError(cmd.Err())\n}\n"
  },
  {
    "path": "internal/dmap/lock_handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"encoding/hex\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc (s *Service) unlockCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tunlockCmd, err := protocol.ParseUnlockCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(unlockCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\ttoken, err := hex.DecodeString(unlockCmd.Token)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\terr = dm.Unlock(s.ctx, unlockCmd.Key, token)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tconn.WriteString(protocol.StatusOK)\n}\n\nfunc (s *Service) lockCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tlockCmd, err := protocol.ParseLockCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(lockCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tvar timeout = nilTimeout\n\tswitch {\n\tcase lockCmd.EX != 0:\n\t\ttimeout = time.Duration(lockCmd.EX * float64(time.Second))\n\tcase lockCmd.PX != 0:\n\t\ttimeout = time.Duration(lockCmd.PX * int64(time.Millisecond))\n\t}\n\n\tvar deadline = time.Duration(lockCmd.Deadline * float64(time.Second))\n\ttoken, err := dm.Lock(s.ctx, lockCmd.Key, timeout, deadline)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tconn.WriteString(hex.EncodeToString(token))\n}\n\nfunc (s *Service) lockLeaseCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tlockLeaseCmd, err := protocol.ParseLockLeaseCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(lockLeaseCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\ttimeout := time.Duration(lockLeaseCmd.Timeout * float64(time.Second))\n\ttoken, err := hex.DecodeString(lockLeaseCmd.Token)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\terr = dm.Lease(s.ctx, lockLeaseCmd.Key, token, timeout)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteString(protocol.StatusOK)\n}\n\nfunc (s *Service) plockLeaseCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tplockLeaseCmd, err := protocol.ParsePLockLeaseCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(plockLeaseCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\ttimeout := time.Duration(plockLeaseCmd.Timeout * int64(time.Millisecond))\n\ttoken, err := hex.DecodeString(plockLeaseCmd.Token)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\terr = dm.Lease(s.ctx, plockLeaseCmd.Key, token, timeout)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteString(protocol.StatusOK)\n}\n"
  },
  {
    "path": "internal/dmap/lock_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"encoding/hex\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Lock_With_Timeout_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tkey := \"lock.test.foo\"\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\ttoken, err := dm.Lock(ctx, key, time.Second, time.Second)\n\trequire.NoError(t, err)\n\n\terr = dm.Unlock(ctx, key, token)\n\trequire.NoError(t, err)\n}\n\nfunc TestDMap_Unlock_After_Timeout_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tkey := \"lock.test.foo\"\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\ttoken, err := dm.Lock(context.Background(), key, time.Millisecond, time.Second)\n\trequire.NoError(t, err)\n\n\t<-time.After(10 * time.Millisecond)\n\n\terr = dm.Unlock(ctx, key, token)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestDMap_Lock_With_Timeout_ErrLockNotAcquired_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tkey := \"lock.test.foo\"\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\t_, err = dm.Lock(ctx, key, time.Second, time.Second)\n\trequire.NoError(t, err)\n\n\t_, err = dm.Lock(context.Background(), key, time.Second, time.Millisecond)\n\trequire.ErrorIs(t, err, ErrLockNotAcquired)\n}\n\nfunc TestDMap_LockLease_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tkey := \"lock.test.foo\"\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\ttoken, err := dm.Lock(context.Background(), key, time.Second, time.Second)\n\trequire.NoError(t, err)\n\n\terr = dm.Lease(ctx, key, token, 2*time.Second)\n\trequire.NoError(t, err)\n\n\te, err := dm.Get(ctx, key)\n\trequire.NoError(t, err)\n\n\tif e.TTL()-(time.Now().UnixNano()/1000000) <= 1900 {\n\t\tt.Fatalf(\"Expected >=1900. Got: %v\", e.TTL()-(time.Now().UnixNano()/1000000))\n\t}\n\n\t<-time.After(3 * time.Second)\n\n\terr = dm.Lease(ctx, key, token, 3*time.Second)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestDMap_Lock_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tkey := \"lock.test.foo\"\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\ttoken, err := dm.Lock(ctx, key, nilTimeout, time.Second)\n\trequire.NoError(t, err)\n\n\terr = dm.Unlock(ctx, key, token)\n\trequire.NoError(t, err)\n}\n\nfunc TestDMap_Lock_ErrLockNotAcquired_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tkey := \"lock.test.foo\"\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\t_, err = dm.Lock(ctx, key, nilTimeout, time.Second)\n\trequire.NoError(t, err)\n\n\t_, err = dm.Lock(ctx, key, nilTimeout, time.Millisecond)\n\trequire.ErrorIs(t, err, ErrLockNotAcquired)\n}\n\nfunc TestDMap_LockWithTimeout_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s1.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\ttokens := make(map[string][]byte)\n\tfor i := 0; i < 100; i++ {\n\t\tkey := \"lock.test.foo.\" + strconv.Itoa(i)\n\t\ttoken, err := dm.Lock(ctx, key, time.Hour, time.Second)\n\t\trequire.NoError(t, err)\n\t\ttokens[key] = token\n\t}\n\n\tcluster.AddMember(nil)\n\tfor key, token := range tokens {\n\t\terr = dm.Unlock(ctx, key, token)\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc TestDMap_LockLease_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s1.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\ttokens := make(map[string][]byte)\n\tfor i := 0; i < 100; i++ {\n\t\tkey := \"lock.test.foo.\" + strconv.Itoa(i)\n\t\ttoken, err := dm.Lock(ctx, key, 5*time.Second, 10*time.Millisecond)\n\t\trequire.NoError(t, err)\n\t\ttokens[key] = token\n\t}\n\n\tcluster.AddMember(nil)\n\tfor key, token := range tokens {\n\t\terr = dm.Lease(ctx, key, token, 10*time.Second)\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc TestDMap_Lock_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\ttokens := make(map[string][]byte)\n\tfor i := 0; i < 100; i++ {\n\t\tkey := \"lock.test.foo.\" + strconv.Itoa(i)\n\t\ttoken, err := dm.Lock(ctx, key, nilTimeout, time.Second)\n\t\trequire.NoError(t, err)\n\t\ttokens[key] = token\n\t}\n\n\tcluster.AddMember(nil)\n\tfor key, token := range tokens {\n\t\terr = dm.Unlock(ctx, key, token)\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc TestDMap_LockWithTimeout_ErrLockNotAcquired_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\tkey := \"lock.test.foo.\" + strconv.Itoa(i)\n\t\t_, err := dm.Lock(ctx, key, time.Second, time.Second)\n\t\trequire.NoError(t, err)\n\t}\n\n\tcluster.AddMember(nil)\n\n\tfor i := 0; i < 100; i++ {\n\t\tkey := \"lock.test.foo.\" + strconv.Itoa(i)\n\t\t_, err = dm.Lock(ctx, key, time.Second, time.Millisecond)\n\t\trequire.ErrorIs(t, err, ErrLockNotAcquired)\n\t}\n}\n\nfunc TestDMap_Lock_After_Lock_With_Timeout_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\tkey := \"lock.test.foo.\" + strconv.Itoa(i)\n\t\t_, err = dm.Lock(ctx, key, time.Millisecond, time.Second)\n\t\trequire.NoError(t, err)\n\t}\n\n\tcluster.AddMember(nil)\n\tfor i := 0; i < 100; i++ {\n\t\tkey := \"lock.test.foo.\" + strconv.Itoa(i)\n\t\t_, err = dm.Lock(ctx, key, nilTimeout, time.Second)\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc TestDMap_tryLock(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tkey := \"lock.test.foo\"\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\t_, err = dm.Lock(context.Background(), key, time.Second, time.Second)\n\trequire.NoError(t, err)\n\n\tvar i int\n\tvar acquired bool\n\tfor i <= 10 {\n\t\ti++\n\t\t_, err := dm.Lock(context.Background(), key, nilTimeout, 100*time.Millisecond)\n\t\tif err == ErrLockNotAcquired {\n\t\t\t// already acquired\n\t\t\tcontinue\n\t\t}\n\t\trequire.NoError(t, err)\n\t\t// Acquired\n\t\tacquired = true\n\t\tbreak\n\t}\n\tif !acquired {\n\t\tt.Fatal(\"Failed to acquire lock\")\n\t}\n}\n\nfunc TestDMap_lockCommandHandler(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tcmd := protocol.NewLock(\"lock.test\", \"lock.test.foo\", 1).Command(s.ctx)\n\trc := s.client.Get(s.rt.This().String())\n\terr := rc.Process(s.ctx, cmd)\n\trequire.NoError(t, err)\n\n\ttoken, err := cmd.Bytes()\n\trequire.NoError(t, err)\n\n\tcmdUnlock := protocol.NewUnlock(\"lock.test\", \"lock.test.foo\", string(token)).Command(s.ctx)\n\terr = rc.Process(s.ctx, cmdUnlock)\n\trequire.NoError(t, err)\n\n\terr = cmdUnlock.Err()\n\trequire.NoError(t, err)\n}\n\nfunc TestDMap_lockCommandHandler_EX(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tcmd := protocol.NewLock(\"lock.test\", \"lock.test.foo\", 1).SetEX(1).Command(s.ctx)\n\trc := s.client.Get(s.rt.This().String())\n\terr := rc.Process(s.ctx, cmd)\n\trequire.NoError(t, err)\n\n\ttoken, err := cmd.Bytes()\n\trequire.NoError(t, err)\n\n\t<-time.After(2 * time.Second)\n\n\tcmdUnlock := protocol.NewUnlock(\"lock.test\", \"lock.test.foo\", string(token)).Command(s.ctx)\n\terr = rc.Process(s.ctx, cmdUnlock)\n\terr = protocol.ConvertError(err)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestDMap_lockCommandHandler_PX(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tcmd := protocol.NewLock(\"lock.test\", \"lock.test.foo\", 1).\n\t\tSetPX((10 * time.Millisecond).Milliseconds()).Command(s.ctx)\n\trc := s.client.Get(s.rt.This().String())\n\terr := rc.Process(s.ctx, cmd)\n\trequire.NoError(t, err)\n\n\ttoken, err := cmd.Bytes()\n\trequire.NoError(t, err)\n\n\t<-time.After(20 * time.Millisecond)\n\n\tcmdUnlock := protocol.NewUnlock(\"lock.test\", \"lock.test.foo\", string(token)).Command(s.ctx)\n\terr = rc.Process(s.ctx, cmdUnlock)\n\terr = protocol.ConvertError(err)\n\trequire.ErrorIs(t, err, ErrNoSuchLock)\n}\n\nfunc TestDMap_lockLeaseCommandHandler(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tkey := \"lock.test.foo\"\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\n\ttoken, err := dm.Lock(ctx, key, time.Second, time.Second)\n\trequire.NoError(t, err)\n\n\t// Update the timeout\n\tetoken := hex.EncodeToString(token)\n\tcmd := protocol.NewLockLease(\"lock.test\", key, etoken, 10).Command(s.ctx)\n\trc := s.client.Get(s.rt.This().String())\n\terr = rc.Process(s.ctx, cmd)\n\trequire.NoError(t, err)\n\n\t<-time.After(2 * time.Second)\n\n\terr = dm.Unlock(ctx, key, token)\n\trequire.NoError(t, err)\n}\n\nfunc TestDMap_plockLeaseCommandHandler(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tkey := \"lock.test.foo\"\n\tdm, err := s.NewDMap(\"lock.test\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\n\ttoken, err := dm.Lock(ctx, key, 250*time.Millisecond, time.Second)\n\trequire.NoError(t, err)\n\n\t// Update the timeout\n\tetoken := hex.EncodeToString(token)\n\tcmd := protocol.NewPLockLease(\"lock.test\", key, etoken, 2000).Command(s.ctx)\n\trc := s.client.Get(s.rt.This().String())\n\terr = rc.Process(s.ctx, cmd)\n\trequire.NoError(t, err)\n\n\t<-time.After(500 * time.Millisecond)\n\n\terr = dm.Unlock(ctx, key, token)\n\trequire.NoError(t, err)\n}\n"
  },
  {
    "path": "internal/dmap/put.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/bufpool\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/resp\"\n\t\"github.com/olric-data/olric/internal/stats\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/redis/go-redis/v9\"\n)\n\nvar pool = bufpool.New()\n\n// EntriesTotal is the total number of entries(including replicas)\n// stored during the life of this instance.\nvar EntriesTotal = stats.NewInt64Counter()\n\nvar (\n\tErrKeyFound      = errors.New(\"key found\")\n\tErrWriteQuorum   = errors.New(\"write quorum cannot be reached\")\n\tErrKeyTooLarge   = errors.New(\"key too large\")\n\tErrEntryTooLarge = errors.New(\"entry too large for the configured table size\")\n)\n\nfunc prepareTTL(e *env) int64 {\n\tvar ttl int64\n\tswitch {\n\tcase e.putConfig.HasEX:\n\t\tttl = (e.putConfig.EX.Nanoseconds() + time.Now().UnixNano()) / 1000000\n\tcase e.putConfig.HasPX:\n\t\tttl = (e.putConfig.PX.Nanoseconds() + time.Now().UnixNano()) / 1000000\n\tcase e.putConfig.HasEXAT:\n\t\tttl = e.putConfig.EXAT.Nanoseconds() / 1000000\n\tcase e.putConfig.HasPXAT:\n\t\tttl = e.putConfig.PXAT.Nanoseconds() / 1000000\n\tdefault:\n\t\tns := e.timeout.Nanoseconds()\n\t\tif ns != 0 {\n\t\t\tttl = (ns + time.Now().UnixNano()) / 1000000\n\t\t}\n\t}\n\treturn ttl\n}\n\n// putOnFragment calls underlying storage engine's Put method to store the key/value pair. It's not thread-safe.\nfunc (dm *DMap) putEntryOnFragment(e *env, nt storage.Entry) error {\n\tif e.putConfig.OnlyUpdateTTL {\n\t\terr := e.fragment.storage.UpdateTTL(e.hkey, nt)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, storage.ErrKeyNotFound) {\n\t\t\t\terr = ErrKeyNotFound\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\terr := e.fragment.storage.Put(e.hkey, nt)\n\tif errors.Is(err, storage.ErrKeyTooLarge) {\n\t\terr = ErrKeyTooLarge\n\t}\n\tif errors.Is(err, storage.ErrEntryTooLarge) {\n\t\terr = ErrEntryTooLarge\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// total number of entries stored during the life of this instance.\n\tEntriesTotal.Increase(1)\n\n\treturn nil\n}\n\nfunc (dm *DMap) prepareEntry(e *env) storage.Entry {\n\tnt := e.fragment.storage.NewEntry()\n\tnt.SetKey(e.key)\n\tnt.SetValue(e.value)\n\tnt.SetTTL(prepareTTL(e))\n\tnt.SetTimestamp(e.timestamp)\n\treturn nt\n}\n\nfunc (dm *DMap) putOnReplicaFragment(e *env) error {\n\tpart := dm.getPartitionByHKey(e.hkey, partitions.BACKUP)\n\tf, err := dm.loadOrCreateFragment(part)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.fragment = f\n\tf.Lock()\n\tdefer f.Unlock()\n\n\terr = f.storage.PutRaw(e.hkey, e.value)\n\tif errors.Is(err, storage.ErrKeyTooLarge) {\n\t\terr = ErrKeyTooLarge\n\t}\n\tif errors.Is(err, storage.ErrEntryTooLarge) {\n\t\terr = ErrEntryTooLarge\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// total number of entries stored during the life of this instance.\n\tEntriesTotal.Increase(1)\n\n\treturn nil\n}\n\nfunc (dm *DMap) asyncPutOnBackup(e *env, data []byte, owner discovery.Member) {\n\tdefer dm.s.wg.Done()\n\n\trc := dm.s.client.Get(owner.String())\n\tcmd := protocol.NewPutEntry(e.dmap, e.key, data).Command(dm.s.ctx)\n\terr := rc.Process(dm.s.ctx, cmd)\n\tif err != nil {\n\t\tif dm.s.log.V(3).Ok() {\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to create replica in async mode: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\terr = cmd.Err()\n\tif err != nil {\n\t\tif dm.s.log.V(3).Ok() {\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to create replica in async mode: %v\", err)\n\t\t}\n\t}\n}\n\nfunc (dm *DMap) asyncPutOnCluster(e *env, nt storage.Entry) error {\n\terr := dm.putEntryOnFragment(e, nt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tencodedEntry := nt.Encode()\n\t// Fire and forget mode.\n\towners := dm.s.backup.PartitionOwnersByHKey(e.hkey)\n\tfor _, owner := range owners {\n\t\tif !dm.s.isAlive() {\n\t\t\treturn ErrServerGone\n\t\t}\n\n\t\tdm.s.wg.Add(1)\n\t\tgo dm.asyncPutOnBackup(e, encodedEntry, owner)\n\t}\n\n\treturn nil\n}\n\nfunc (dm *DMap) syncPutOnCluster(e *env, nt storage.Entry) error {\n\t// Quorum based replication.\n\tvar successful int\n\n\tencodedEntry := nt.Encode()\n\n\towners := dm.s.backup.PartitionOwnersByHKey(e.hkey)\n\tfor _, owner := range owners {\n\t\trc := dm.s.client.Get(owner.String())\n\t\tcmd := protocol.NewPutEntry(dm.name, e.key, encodedEntry).Command(dm.s.ctx)\n\t\terr := rc.Process(dm.s.ctx, cmd)\n\t\tif err != nil {\n\t\t\treturn protocol.ConvertError(err)\n\t\t}\n\t\terr = protocol.ConvertError(cmd.Err())\n\t\tif err != nil {\n\t\t\tif dm.s.log.V(3).Ok() {\n\t\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to call put command on %s for DMap: %s: %v\", owner, e.dmap, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tsuccessful++\n\t}\n\terr := dm.putEntryOnFragment(e, nt)\n\tif err != nil {\n\t\tif dm.s.log.V(3).Ok() {\n\t\t\tdm.s.log.V(3).Printf(\"[ERROR] Failed to call put command on %s for DMap: %s: %v\", dm.s.rt.This(), e.dmap, err)\n\t\t}\n\t} else {\n\t\tsuccessful++\n\t}\n\tif successful >= dm.s.config.WriteQuorum {\n\t\treturn nil\n\t}\n\treturn ErrWriteQuorum\n}\n\nfunc (dm *DMap) setLRUEvictionStats(e *env) error {\n\t// Try to make room for the new item, if it's required.\n\t// MaxKeys and MaxInuse properties of LRU can be used in the same time.\n\t// But I think that it's good to use only one of time in a production system.\n\t// Because it should be easy to understand and debug.\n\tst := e.fragment.storage.Stats()\n\n\t// This works for every request if you enabled LRU.\n\t// But loading a number from memory should be very cheap.\n\t// ownedPartitionCount changes in the case of node join or leave.\n\townedPartitionCount := dm.s.rt.OwnedPartitionCount()\n\tif ownedPartitionCount == 0 {\n\t\t// Routing table is an eventually consistent data structure. In order to prevent a panic in prod,\n\t\t// check the owned partition count before doing math.\n\t\treturn nil\n\t}\n\n\tif dm.config.maxKeys > 0 {\n\t\t// MaxKeys controls maximum key count owned by this node.\n\t\t// We need ownedPartitionCount property because every partition\n\t\t// manages itself independently. So if you set MaxKeys=70 and\n\t\t// your partition count is 7, every partition 10 keys at maximum.\n\t\tif st.Length > 0 && st.Length >= dm.config.maxKeys/int(ownedPartitionCount) {\n\t\t\terr := dm.evictKeyWithLRU(e)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif dm.config.maxInuse > 0 {\n\t\t// MaxInuse controls maximum in-use memory of partitions on this node.\n\t\t// We need ownedPartitionCount property because every partition\n\t\t// manages itself independently. So if you set MaxInuse=70M(in bytes) and\n\t\t// your partition count is 7, every partition consumes 10M in-use space at maximum.\n\t\t// WARNING: Actual allocated memory can be different.\n\t\tif st.Inuse > 0 && st.Inuse >= dm.config.maxInuse/int(ownedPartitionCount) {\n\t\t\terr := dm.evictKeyWithLRU(e)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (dm *DMap) checkPutConditions(e *env) error {\n\t// Only set the key if it does not already exist.\n\tif e.putConfig.HasNX {\n\t\tttl, err := e.fragment.storage.GetTTL(e.hkey)\n\t\tif err == nil {\n\t\t\tif !isKeyExpired(ttl) {\n\t\t\t\treturn ErrKeyFound\n\t\t\t}\n\t\t}\n\t\tif errors.Is(err, storage.ErrKeyNotFound) {\n\t\t\terr = nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Only set the key if it already exists.\n\tif e.putConfig.HasXX && !e.fragment.storage.Check(e.hkey) {\n\t\tttl, err := e.fragment.storage.GetTTL(e.hkey)\n\t\tif err == nil {\n\t\t\tif isKeyExpired(ttl) {\n\t\t\t\treturn ErrKeyNotFound\n\t\t\t}\n\t\t}\n\t\tif errors.Is(err, storage.ErrKeyNotFound) {\n\t\t\terr = ErrKeyNotFound\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (dm *DMap) putOnCluster(e *env) error {\n\tpart := dm.getPartitionByHKey(e.hkey, partitions.PRIMARY)\n\tf, err := dm.loadOrCreateFragment(part)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.fragment = f\n\tf.Lock()\n\tdefer f.Unlock()\n\n\tif err = dm.checkPutConditions(e); err != nil {\n\t\treturn err\n\t}\n\n\tif dm.config != nil {\n\t\tif dm.config.ttlDuration.Seconds() != 0 && e.timeout.Seconds() == 0 {\n\t\t\te.timeout = dm.config.ttlDuration\n\t\t}\n\t\tif dm.config.evictionPolicy == config.LRUEviction {\n\t\t\tif err = dm.setLRUEvictionStats(e); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tnt := dm.prepareEntry(e)\n\tif dm.s.config.ReplicaCount > config.MinimumReplicaCount {\n\t\tswitch dm.s.config.ReplicationMode {\n\t\tcase config.AsyncReplicationMode:\n\t\t\t// Fire and forget mode. Calls PutBackup command in different goroutines\n\t\t\t// and stores the key/value pair on local storage instance.\n\t\t\treturn dm.asyncPutOnCluster(e, nt)\n\t\tcase config.SyncReplicationMode:\n\t\t\t// Quorum based replication.\n\t\t\treturn dm.syncPutOnCluster(e, nt)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid replication mode: %v\", dm.s.config.ReplicationMode)\n\t\t}\n\t}\n\n\t// single replica\n\treturn dm.putEntryOnFragment(e, nt)\n}\n\nfunc (dm *DMap) writePutCommand(e *env) (*redis.StatusCmd, error) {\n\tcmd := protocol.NewPut(e.dmap, e.key, e.value)\n\tswitch {\n\tcase e.putConfig.HasEX:\n\t\tcmd.SetEX(e.putConfig.EX.Seconds())\n\tcase e.putConfig.HasPX:\n\t\tcmd.SetPX(e.putConfig.PX.Milliseconds())\n\tcase e.putConfig.HasEXAT:\n\t\tcmd.SetEXAT(e.putConfig.EXAT.Seconds())\n\tcase e.putConfig.HasPXAT:\n\t\tcmd.SetPXAT(e.putConfig.PXAT.Milliseconds())\n\t}\n\n\tswitch {\n\tcase e.putConfig.HasNX:\n\t\tcmd.SetNX()\n\tcase e.putConfig.HasXX:\n\t\tcmd.SetXX()\n\t}\n\n\treturn cmd.Command(dm.s.ctx), nil\n}\n\n// put controls every write operation in Olric. It redirects the requests to its owner,\n// if the key belongs to another host.\nfunc (dm *DMap) put(e *env) error {\n\te.hkey = partitions.HKey(e.dmap, e.key)\n\tmember := dm.s.primary.PartitionByHKey(e.hkey).Owner()\n\tif member.CompareByName(dm.s.rt.This()) {\n\t\t// We are on the partition owner.\n\t\treturn dm.putOnCluster(e)\n\t}\n\n\t// Redirect to the partition owner.\n\tcmd, err := dm.writePutCommand(e)\n\tif err != nil {\n\t\treturn err\n\t}\n\trc := dm.s.client.Get(member.String())\n\terr = rc.Process(e.ctx, cmd)\n\tif err != nil {\n\t\treturn protocol.ConvertError(err)\n\t}\n\treturn protocol.ConvertError(cmd.Err())\n}\n\ntype PutConfig struct {\n\tHasEX         bool\n\tEX            time.Duration\n\tHasPX         bool\n\tPX            time.Duration\n\tHasEXAT       bool\n\tEXAT          time.Duration\n\tHasPXAT       bool\n\tPXAT          time.Duration\n\tHasNX         bool\n\tHasXX         bool\n\tOnlyUpdateTTL bool\n}\n\n// Put sets the value for the given key. It overwrites any previous value\n// for that key, and it's thread-safe. The key has to be a string. value type\n// is arbitrary. It is safe to modify the contents of the arguments after\n// Put returns but not before.\nfunc (dm *DMap) Put(ctx context.Context, key string, value interface{}, cfg *PutConfig) error {\n\tvalueBuf := pool.Get()\n\tdefer pool.Put(valueBuf)\n\n\tenc := resp.New(valueBuf)\n\terr := enc.Encode(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cfg == nil {\n\t\tcfg = &PutConfig{}\n\t}\n\te := newEnv(ctx)\n\te.putConfig = cfg\n\te.dmap = dm.name\n\te.key = key\n\te.value = make([]byte, valueBuf.Len())\n\tcopy(e.value[:], valueBuf.Bytes())\n\treturn dm.put(e)\n}\n"
  },
  {
    "path": "internal/dmap/put_handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc (s *Service) putCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tputCmd, err := protocol.ParsePutCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tdm, err := s.getOrCreateDMap(putCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tvar pc PutConfig\n\tswitch {\n\tcase putCmd.NX:\n\t\tpc.HasNX = true\n\tcase putCmd.XX:\n\t\tpc.HasXX = true\n\t}\n\n\tswitch {\n\tcase putCmd.EX != 0:\n\t\tpc.HasEX = true\n\t\tpc.EX = time.Duration(putCmd.EX * float64(time.Second))\n\tcase putCmd.PX != 0:\n\t\tpc.HasPX = true\n\t\tpc.PX = time.Duration(putCmd.PX * int64(time.Millisecond))\n\tcase putCmd.EXAT != 0:\n\t\tpc.HasEXAT = true\n\t\tpc.EXAT = time.Duration(putCmd.EXAT * float64(time.Second))\n\tcase putCmd.PXAT != 0:\n\t\tpc.HasPXAT = true\n\t\tpc.PXAT = time.Duration(putCmd.PXAT * int64(time.Millisecond))\n\t}\n\n\te := newEnv(s.ctx)\n\te.putConfig = &pc\n\te.dmap = putCmd.DMap\n\te.key = putCmd.Key\n\te.value = putCmd.Value\n\terr = dm.put(e)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteString(protocol.StatusOK)\n}\n\nfunc (s *Service) putEntryCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tputEntryCmd, err := protocol.ParsePutEntryCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(putEntryCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\te := newEnv(s.ctx)\n\te.hkey = partitions.HKey(putEntryCmd.DMap, putEntryCmd.Key)\n\te.dmap = putEntryCmd.DMap\n\te.key = putEntryCmd.Key\n\te.value = putEntryCmd.Value\n\terr = dm.putOnReplicaFragment(e)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteString(protocol.StatusOK)\n}\n"
  },
  {
    "path": "internal/dmap/put_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"encoding/hex\"\n\t\"errors\"\n\t\"math/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Put_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\tgr, err := dm.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), gr.Value())\n\t}\n}\n\nfunc TestDMap_Put_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\n\tdm1, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\tdm2, err := s2.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\tgr, err := dm2.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), gr.Value())\n\t}\n}\n\nfunc TestDMap_Put_AsyncReplicationMode(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\t// Create DMap services with custom configuration\n\tc1 := testutil.NewConfig()\n\tc1.ReplicationMode = config.AsyncReplicationMode\n\te1 := testcluster.NewEnvironment(c1)\n\ts1 := cluster.AddMember(e1).(*Service)\n\n\tc2 := testutil.NewConfig()\n\tc2.ReplicationMode = config.AsyncReplicationMode\n\te2 := testcluster.NewEnvironment(c2)\n\ts2 := cluster.AddMember(e2).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\n\tdm, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Wait some time for async replication\n\t<-time.After(100 * time.Millisecond)\n\n\tdm2, err := s2.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\tgr, err := dm2.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), gr.Value())\n\t}\n}\n\nfunc TestDMap_Put_WriteQuorum(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\t// Create DMap services with custom configuration\n\tc1 := testutil.NewConfig()\n\tc1.ReplicaCount = 2\n\tc1.WriteQuorum = 2\n\te1 := testcluster.NewEnvironment(c1)\n\ts1 := cluster.AddMember(e1).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tvar hit bool\n\tdm, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\tkey := testutil.ToKey(i)\n\n\t\thkey := partitions.HKey(dm.name, key)\n\t\thost := dm.s.primary.PartitionByHKey(hkey).Owner()\n\t\tif s1.rt.This().CompareByID(host) {\n\t\t\terr = dm.Put(ctx, key, testutil.ToVal(i), nil)\n\t\t\tif err != ErrWriteQuorum {\n\t\t\t\tt.Fatalf(\"Expected ErrWriteQuorum. Got: %v\", err)\n\t\t\t}\n\t\t\thit = true\n\t\t}\n\t}\n\tif !hit {\n\t\tt.Fatalf(\"No keys checked on %v\", s1)\n\t}\n}\n\nfunc TestDMap_Put_PX(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm1, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tpc := &PutConfig{\n\t\tHasPX: true,\n\t\tPX:    time.Millisecond,\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), pc)\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(10 * time.Millisecond)\n\n\tdm2, err := s2.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, err := dm2.Get(ctx, testutil.ToKey(i))\n\t\tif err != ErrKeyNotFound {\n\t\t\tt.Fatalf(\"Expected ErrKeyNotFound. Got: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestDMap_Put_NX(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\tpc := &PutConfig{\n\t\tHasNX: true,\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i*2), pc)\n\t\tif err == ErrKeyFound {\n\t\t\terr = nil\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\tgr, err := dm.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), gr.Value())\n\t}\n}\n\nfunc TestDMap_Put_XX(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tpc := &PutConfig{\n\t\tHasXX: true,\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i*2), pc)\n\t\tif errors.Is(err, ErrKeyNotFound) {\n\t\t\terr = nil\n\t\t}\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, err = dm.Get(ctx, testutil.ToKey(i))\n\t\tif !errors.Is(err, ErrKeyNotFound) {\n\t\t\tt.Fatalf(\"Expected ErrKeyNotFound. Got: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestDMap_Put_EX(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm1, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tpc := &PutConfig{\n\t\tHasEX: true,\n\t\tEX:    time.Second / 4,\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), pc)\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(time.Second)\n\n\tdm2, err := s2.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, err := dm2.Get(ctx, testutil.ToKey(i))\n\t\tif err != ErrKeyNotFound {\n\t\t\tt.Fatalf(\"Expected ErrKeyNotFound. Got: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestDMap_Put_EXAT(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm1, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tpc := &PutConfig{\n\t\tHasEXAT: true,\n\t\tEXAT:    time.Duration(time.Now().Add(time.Second).UnixNano()),\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), pc)\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(time.Second)\n\n\tdm2, err := s2.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, err := dm2.Get(ctx, testutil.ToKey(i))\n\t\trequire.ErrorIs(t, err, ErrKeyNotFound)\n\t}\n}\n\nfunc TestDMap_Put_PXAT(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm1, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tpc := &PutConfig{\n\t\tHasPXAT: true,\n\t\tPXAT:    time.Duration(time.Now().Add(time.Millisecond).UnixNano()),\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), pc)\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(10 * time.Millisecond)\n\n\tdm2, err := s2.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 10; i++ {\n\t\t_, err := dm2.Get(ctx, testutil.ToKey(i))\n\t\trequire.ErrorIs(t, err, ErrKeyNotFound)\n\t}\n}\n\nfunc TestDMap_Put_ErrKeyTooLarge(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tdata := make([]byte, 300)\n\t_, err = rand.Read(data)\n\trequire.NoError(t, err)\n\tkey := hex.EncodeToString(data)\n\terr = dm.Put(ctx, key, \"value\", nil)\n\trequire.ErrorIs(t, err, ErrKeyTooLarge)\n}\n\nfunc TestDMap_Put_ErrEntryTooLarge(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tdata := make([]byte, 1<<21)\n\t_, err = rand.Read(data)\n\trequire.NoError(t, err)\n\n\terr = dm.Put(ctx, \"key\", data, nil)\n\trequire.ErrorIs(t, err, ErrEntryTooLarge)\n}\n\nfunc TestDMap_Put_PX_With_NX(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm1, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tpc := &PutConfig{\n\t\tHasPX: true,\n\t\tPX:    time.Minute,\n\t\tHasNX: true,\n\t}\n\tfor i := range 10 {\n\t\terr = dm1.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), pc)\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(10 * time.Millisecond)\n\n\tdm2, err := s2.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfor i := range 10 {\n\t\tgr, err := dm2.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\tassert.NotZero(t, gr.TTL())\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/scan_handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"strconv\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc (dm *DMap) scanOnFragment(f *fragment, cursor uint64, sc *ScanConfig) ([]string, uint64, error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\n\tvar items []string\n\tvar err error\n\n\tif sc.HasMatch {\n\t\tcursor, err = f.storage.ScanRegexMatch(cursor, sc.Match, sc.Count, func(e storage.Entry) bool {\n\t\t\titems = append(items, e.Key())\n\t\t\treturn true\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\treturn items, cursor, nil\n\t}\n\n\tcursor, err = f.storage.Scan(cursor, sc.Count, func(e storage.Entry) bool {\n\t\titems = append(items, e.Key())\n\t\treturn true\n\t})\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn items, cursor, nil\n}\n\nfunc (dm *DMap) Scan(partID, cursor uint64, sc *ScanConfig) ([]string, uint64, error) {\n\tvar part *partitions.Partition\n\tif sc.Replica {\n\t\tpart = dm.s.backup.PartitionByID(partID)\n\t} else {\n\t\tpart = dm.s.primary.PartitionByID(partID)\n\t}\n\tf, err := dm.loadFragment(part)\n\tif err == errFragmentNotFound {\n\t\treturn nil, 0, nil\n\t}\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn dm.scanOnFragment(f, cursor, sc)\n}\n\ntype ScanConfig struct {\n\tHasCount bool\n\tCount    int\n\tHasMatch bool\n\tMatch    string\n\tReplica  bool\n}\n\ntype ScanOption func(*ScanConfig)\n\nfunc Count(c int) ScanOption {\n\treturn func(cfg *ScanConfig) {\n\t\tcfg.HasCount = true\n\t\tcfg.Count = c\n\t}\n}\n\nfunc Match(s string) ScanOption {\n\treturn func(cfg *ScanConfig) {\n\t\tcfg.HasMatch = true\n\t\tcfg.Match = s\n\t}\n}\n\nfunc (s *Service) scanCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tscanCmd, err := protocol.ParseScanCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tdm, err := s.getOrCreateDMap(scanCmd.DMap)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tvar sc ScanConfig\n\tvar options []ScanOption\n\toptions = append(options, Count(scanCmd.Count))\n\n\tif scanCmd.Match != \"\" {\n\t\toptions = append(options, Match(scanCmd.Match))\n\t}\n\n\tfor _, opt := range options {\n\t\topt(&sc)\n\t}\n\tsc.Replica = scanCmd.Replica\n\n\tvar result []string\n\tvar cursor uint64\n\tresult, cursor, err = dm.Scan(scanCmd.PartID, scanCmd.Cursor, &sc)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteArray(2)\n\tconn.WriteBulkString(strconv.FormatUint(cursor, 10))\n\tconn.WriteArray(len(result))\n\tfor _, i := range result {\n\t\tconn.WriteBulkString(i)\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/scan_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc testScanIterator(t *testing.T, s *Service, allKeys map[string]bool, sc *ScanConfig) int {\n\tif sc == nil {\n\t\tsc = &ScanConfig{}\n\t}\n\tctx := context.Background()\n\trc := s.client.Get(s.rt.This().String())\n\n\tvar totalKeys int\n\tvar partID, cursor uint64\n\tfor {\n\t\tr := protocol.NewScan(partID, \"mydmap\", cursor)\n\t\tif sc.Replica {\n\t\t\tr.SetReplica()\n\t\t}\n\t\tif sc.HasMatch {\n\t\t\tr.SetMatch(sc.Match)\n\t\t}\n\t\tcmd := r.Command(ctx)\n\t\terr := rc.Process(ctx, cmd)\n\t\trequire.NoError(t, err)\n\n\t\tvar keys []string\n\t\tkeys, cursor, err = cmd.Result()\n\t\trequire.NoError(t, err)\n\t\ttotalKeys += len(keys)\n\n\t\tfor _, key := range keys {\n\t\t\t_, ok := allKeys[key]\n\t\t\trequire.True(t, ok)\n\t\t\tallKeys[key] = true\n\t\t}\n\t\tif cursor == 0 {\n\t\t\tif partID+1 < s.config.PartitionCount {\n\t\t\t\tpartID++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn totalKeys\n}\n\nfunc TestDMap_scanCommandHandler_Standalone(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tallKeys := make(map[string]bool)\n\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), i, nil)\n\t\trequire.NoError(t, err)\n\n\t\tallKeys[testutil.ToKey(i)] = false\n\t}\n\n\ttotalKeys := testScanIterator(t, s, allKeys, nil)\n\trequire.Equal(t, 100, totalKeys)\n\tfor _, value := range allKeys {\n\t\trequire.True(t, value)\n\t}\n}\n\nfunc TestDMap_scanCommandHandler_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\n\tc1 := testutil.NewConfig()\n\tc1.ReplicaCount = 2\n\tc1.WriteQuorum = 2\n\te1 := testcluster.NewEnvironment(c1)\n\ts1 := cluster.AddMember(e1).(*Service)\n\n\tc2 := testutil.NewConfig()\n\tc2.ReplicaCount = 2\n\tc1.WriteQuorum = 2\n\te2 := testcluster.NewEnvironment(c2)\n\ts2 := cluster.AddMember(e2).(*Service)\n\n\tdefer cluster.Shutdown()\n\n\tctx := context.Background()\n\tdm, err := s1.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tallKeys := make(map[string]bool)\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), i, nil)\n\t\trequire.NoError(t, err)\n\n\t\tallKeys[testutil.ToKey(i)] = false\n\t}\n\n\tt.Run(\"Scan on primary copies\", func(t *testing.T) {\n\t\tvar totalKeys int\n\t\ttotalKeys += testScanIterator(t, s1, allKeys, nil)\n\t\ttotalKeys += testScanIterator(t, s2, allKeys, nil)\n\n\t\trequire.Equal(t, 100, totalKeys)\n\t\tfor _, value := range allKeys {\n\t\t\trequire.True(t, value)\n\t\t}\n\t})\n\n\tt.Run(\"Scan on replicas\", func(t *testing.T) {\n\t\tvar totalKeys int\n\t\tsc := &ScanConfig{Replica: true}\n\t\ttotalKeys += testScanIterator(t, s1, allKeys, sc)\n\t\ttotalKeys += testScanIterator(t, s2, allKeys, sc)\n\n\t\trequire.Equal(t, 100, totalKeys)\n\t\tfor _, value := range allKeys {\n\t\t\trequire.True(t, value)\n\t\t}\n\t})\n}\n\nfunc TestDMap_scanCommandHandler_match(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tevenKeys := make(map[string]bool)\n\tfor i := 0; i < 100; i++ {\n\t\tvar key string\n\t\tif i%2 == 0 {\n\t\t\tkey = fmt.Sprintf(\"even:%s\", testutil.ToKey(i))\n\t\t\tevenKeys[key] = false\n\t\t} else {\n\t\t\tkey = fmt.Sprintf(\"odd:%s\", testutil.ToKey(i))\n\t\t}\n\t\terr = dm.Put(ctx, key, i, nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\tsc := &ScanConfig{\n\t\tHasMatch: true,\n\t\tMatch:    \"^even:\",\n\t}\n\ttotalKeys := testScanIterator(t, s, evenKeys, sc)\n\trequire.Equal(t, 50, totalKeys)\n\tfor _, value := range evenKeys {\n\t\trequire.True(t, value)\n\t}\n}\n\nfunc TestDMap_scanCommandHandler_count(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tctx := context.Background()\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), i, nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\trc := s.client.Get(s.rt.This().String())\n\tvar partID, cursor uint64\n\tr := protocol.NewScan(partID, \"mydmap\", cursor)\n\tr.SetCount(5)\n\tcmd := r.Command(ctx)\n\terr = rc.Process(ctx, cmd)\n\trequire.NoError(t, err)\n\n\tvar keys []string\n\tkeys, _, err = cmd.Result()\n\trequire.NoError(t, err)\n\trequire.Len(t, keys, 5)\n}\n"
  },
  {
    "path": "internal/dmap/service.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/events\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/cluster/routingtable\"\n\t\"github.com/olric-data/olric/internal/environment\"\n\t\"github.com/olric-data/olric/internal/locker\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/internal/service\"\n\t\"github.com/olric-data/olric/pkg/flog\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\nvar errFragmentNotFound = errors.New(\"fragment not found\")\n\ntype storageMap struct {\n\tengines map[string]storage.Engine\n\tconfigs map[string]map[string]interface{}\n}\n\ntype Service struct {\n\tsync.RWMutex // protects dmaps map\n\n\tlog     *flog.Logger\n\tconfig  *config.Config\n\tclient  *server.Client\n\tserver  *server.Server\n\trt      *routingtable.RoutingTable\n\tprimary *partitions.Partitions\n\tbackup  *partitions.Partitions\n\tlocker  *locker.Locker\n\tdmaps   map[string]*DMap\n\tstorage *storageMap\n\twg      sync.WaitGroup\n\tctx     context.Context\n\tcancel  context.CancelFunc\n}\n\nfunc registerErrors() {\n\tprotocol.SetError(\"NOSUCHLOCK\", ErrNoSuchLock)\n\tprotocol.SetError(\"LOCKNOTACQUIRED\", ErrLockNotAcquired)\n\tprotocol.SetError(\"READQUORUM\", ErrReadQuorum)\n\tprotocol.SetError(\"WRITEQUORUM\", ErrWriteQuorum)\n\tprotocol.SetError(\"DMAPNOTFOUND\", ErrDMapNotFound)\n\tprotocol.SetError(\"KEYTOOLARGE\", ErrKeyTooLarge)\n\tprotocol.SetError(\"ENTRYTOOLARGE\", ErrEntryTooLarge)\n\tprotocol.SetError(\"KEYNOTFOUND\", ErrKeyNotFound)\n\tprotocol.SetError(\"KEYFOUND\", ErrKeyFound)\n}\n\nfunc NewService(e *environment.Environment) (service.Service, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\ts := &Service{\n\t\tconfig:  e.Get(\"config\").(*config.Config),\n\t\tclient:  e.Get(\"client\").(*server.Client),\n\t\tserver:  e.Get(\"server\").(*server.Server),\n\t\tlog:     e.Get(\"logger\").(*flog.Logger),\n\t\trt:      e.Get(\"routingtable\").(*routingtable.RoutingTable),\n\t\tprimary: e.Get(\"primary\").(*partitions.Partitions),\n\t\tbackup:  e.Get(\"backup\").(*partitions.Partitions),\n\t\tlocker:  e.Get(\"locker\").(*locker.Locker),\n\t\tstorage: &storageMap{\n\t\t\tengines: make(map[string]storage.Engine),\n\t\t\tconfigs: make(map[string]map[string]interface{}),\n\t\t},\n\t\tdmaps:  make(map[string]*DMap),\n\t\tctx:    ctx,\n\t\tcancel: cancel,\n\t}\n\tregisterErrors()\n\ts.RegisterHandlers()\n\treturn s, nil\n}\n\nfunc (s *Service) isAlive() bool {\n\tselect {\n\tcase <-s.ctx.Done():\n\t\t// The node is gone.\n\t\treturn false\n\tdefault:\n\t}\n\treturn true\n}\n\nfunc getType(data interface{}) string {\n\tt := reflect.TypeOf(data)\n\tif t.Kind() == reflect.Ptr {\n\t\treturn t.Elem().Name()\n\t}\n\treturn t.Name()\n}\n\nfunc (s *Service) publishEvent(e events.Event) {\n\tdefer s.wg.Done()\n\n\trc := s.client.Get(s.rt.This().String())\n\tdata, err := e.Encode()\n\tif err != nil {\n\t\ts.log.V(3).Printf(\"[ERROR] Failed to encode %s: %v\", getType(e), err)\n\t\treturn\n\t}\n\terr = rc.Publish(s.ctx, events.ClusterEventsChannel, data).Err()\n\tif err != nil {\n\t\ts.log.V(3).Printf(\"[ERROR] Failed to publish %s to %s: %v\",\n\t\t\tgetType(e), events.ClusterEventsChannel, err)\n\t}\n}\n\n// Start starts the distributed map service.\nfunc (s *Service) Start() error {\n\ts.wg.Add(1)\n\tgo s.janitorWorker()\n\n\ts.wg.Add(1)\n\tgo s.compactionWorker()\n\n\ts.wg.Add(1)\n\tgo s.evictKeysAtBackground()\n\n\treturn nil\n}\n\nfunc (s *Service) Shutdown(ctx context.Context) error {\n\ts.cancel()\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\ts.wg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\terr := ctx.Err()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase <-done:\n\t}\n\treturn nil\n}\n\nvar _ service.Service = (*Service)(nil)\n"
  },
  {
    "path": "internal/dmap/service_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/testcluster\"\n)\n\nfunc TestDMapService(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\tdefer cluster.Shutdown()\n\ts, ok := cluster.AddMember(nil).(*Service)\n\tif !ok {\n\t\tt.Fatal(\"AddMember returned a different service.Service implementation\")\n\t}\n\n\terr := s.Shutdown(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "internal/dmap/stats_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage dmap\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMap_Stats(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\tdm, err := s.NewDMap(\"mymap\")\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tctx := context.Background()\n\t// EntriesTotal\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), nil)\n\t\trequire.NoError(t, err)\n\t}\n\n\t//GetHits\n\tfor i := 0; i < 10; i++ {\n\t\t_, err = dm.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t}\n\n\t// DeleteHits\n\tfor i := 0; i < 10; i++ {\n\t\t_, err = dm.Delete(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t}\n\n\t// GetMisses\n\tfor i := 0; i < 10; i++ {\n\t\t_, err = dm.Get(ctx, testutil.ToKey(i))\n\t\trequire.ErrorIs(t, err, ErrKeyNotFound)\n\t}\n\n\t// DeleteMisses\n\tfor i := 0; i < 10; i++ {\n\t\t_, err = dm.Delete(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t}\n\n\tpc := &PutConfig{\n\t\tHasEX: true,\n\t\tEX:    time.Millisecond,\n\t}\n\t// EntriesTotal, EvictedTotal\n\tfor i := 0; i < 10; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i), pc)\n\t\trequire.NoError(t, err)\n\t}\n\n\t<-time.After(100 * time.Millisecond)\n\n\t// GetMisses\n\tfor i := 0; i < 10; i++ {\n\t\t_, err = dm.Get(ctx, testutil.ToKey(i))\n\t\trequire.ErrorIs(t, err, ErrKeyNotFound)\n\t}\n\n\tstats := map[string]int64{\n\t\t\"EntriesTotal\": EntriesTotal.Read(),\n\t\t\"GetMisses\":    GetMisses.Read(),\n\t\t\"GetHits\":      GetHits.Read(),\n\t\t\"DeleteHits\":   DeleteHits.Read(),\n\t\t\"DeleteMisses\": DeleteMisses.Read(),\n\t\t\"EvictedTotal\": EvictedTotal.Read(),\n\t}\n\tfor name, value := range stats {\n\t\tif value <= 0 {\n\t\t\tt.Fatalf(\"Expected %s has to be bigger than zero\", name)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "internal/environment/environment.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage environment\n\nimport \"sync\"\n\ntype Environment struct {\n\tsync.RWMutex\n\n\tm map[string]interface{}\n}\n\nfunc New() *Environment {\n\treturn &Environment{\n\t\tm: make(map[string]interface{}),\n\t}\n}\n\nfunc (e *Environment) Get(key string) interface{} {\n\te.RLock()\n\tdefer e.RUnlock()\n\n\tvalue, ok := e.m[key]\n\tif ok {\n\t\treturn value\n\t}\n\treturn nil\n}\n\nfunc (e *Environment) Set(key string, value interface{}) {\n\te.Lock()\n\tdefer e.Unlock()\n\n\te.m[key] = value\n}\n\nfunc (e *Environment) Clone() *Environment {\n\te.RLock()\n\tdefer e.RUnlock()\n\n\tf := New()\n\tfor key, value := range e.m {\n\t\tf.Set(key, value)\n\t}\n\treturn f\n}\n"
  },
  {
    "path": "internal/environment/environment_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage environment\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\ntype envTest struct {\n\tnumber uint64\n}\n\nfunc TestEnvironment(t *testing.T) {\n\te := New()\n\n\tst := &envTest{\n\t\tnumber: 1988,\n\t}\n\n\te.Set(\"my-struct\", st)\n\te.Set(\"my-string\", \"value\")\n\te.Set(\"my-uint64\", uint64(4576))\n\n\tstructVal := e.Get(\"my-struct\")\n\trequire.Equal(t, uint64(1988), structVal.(*envTest).number)\n\n\tstringVal := e.Get(\"my-string\")\n\trequire.Equal(t, \"value\", stringVal)\n\n\tstringUint64 := e.Get(\"my-uint64\")\n\trequire.Equal(t, uint64(4576), stringUint64.(uint64))\n\n\tt.Run(\"Clone\", func(t *testing.T) {\n\t\tclone := e.Clone()\n\t\trequire.Equal(t, e, clone)\n\t})\n}\n"
  },
  {
    "path": "internal/locker/locker.go",
    "content": "/*\nPackage locker provides a mechanism for creating finer-grained locking to help\nfree up more global locks to handle other tasks.\n\nThe implementation looks close to a sync.Mutex, however the user must provide a\nreference to use to refer to the underlying lock when locking and unlocking,\nand unlock may generate an error.\n\nIf a lock with a given name does not exist when `Lock` is called, one is\ncreated.\nLock references are automatically cleaned up on `Unlock` if nothing else is\nwaiting for the lock.\n*/\npackage locker\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\n// ErrNoSuchLock is returned when the requested lock does not exist\nvar ErrNoSuchLock = errors.New(\"no such lock\")\n\n// Locker provides a locking mechanism based on the passed in reference name\ntype Locker struct {\n\tmu    sync.Mutex\n\tlocks map[string]*lockCtr\n}\n\n// lockCtr is used by Locker to represent a lock with a given name.\ntype lockCtr struct {\n\tmu sync.Mutex\n\t// waiters is the number of waiters waiting to acquire the lock\n\t// this is int32 instead of uint32 so we can add `-1` in `dec()`\n\twaiters int32\n}\n\n// inc increments the number of waiters waiting for the lock\nfunc (l *lockCtr) inc() {\n\tatomic.AddInt32(&l.waiters, 1)\n}\n\n// dec decrements the number of waiters waiting on the lock\nfunc (l *lockCtr) dec() {\n\tatomic.AddInt32(&l.waiters, -1)\n}\n\n// count gets the current number of waiters\nfunc (l *lockCtr) count() int32 {\n\treturn atomic.LoadInt32(&l.waiters)\n}\n\n// Lock locks the mutex\nfunc (l *lockCtr) Lock() {\n\tl.mu.Lock()\n}\n\n// Unlock unlocks the mutex\nfunc (l *lockCtr) Unlock() {\n\tl.mu.Unlock()\n}\n\n// New creates a new Locker\nfunc New() *Locker {\n\treturn &Locker{\n\t\tlocks: make(map[string]*lockCtr),\n\t}\n}\n\n// Lock locks a mutex with the given name. If it doesn't exist, one is created\nfunc (l *Locker) Lock(name string) {\n\tl.mu.Lock()\n\tif l.locks == nil {\n\t\tl.locks = make(map[string]*lockCtr)\n\t}\n\n\tnameLock, exists := l.locks[name]\n\tif !exists {\n\t\tnameLock = &lockCtr{}\n\t\tl.locks[name] = nameLock\n\t}\n\n\t// increment the nameLock waiters while inside the main mutex\n\t// this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently\n\tnameLock.inc()\n\tl.mu.Unlock()\n\n\t// Lock the nameLock outside the main mutex so we don't block other operations\n\t// once locked then we can decrement the number of waiters for this lock\n\tnameLock.Lock()\n\tnameLock.dec()\n}\n\n// Unlock unlocks the mutex with the given name\n// If the given lock is not being waited on by any other callers, it is deleted\nfunc (l *Locker) Unlock(name string) error {\n\tl.mu.Lock()\n\tnameLock, exists := l.locks[name]\n\tif !exists {\n\t\tl.mu.Unlock()\n\t\treturn ErrNoSuchLock\n\t}\n\n\tif nameLock.count() == 0 {\n\t\tdelete(l.locks, name)\n\t}\n\tnameLock.Unlock()\n\n\tl.mu.Unlock()\n\treturn nil\n}\n"
  },
  {
    "path": "internal/locker/locker_test.go",
    "content": "package locker\n\nimport (\n\t\"math/rand\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestLockCounter(t *testing.T) {\n\tl := &lockCtr{}\n\tl.inc()\n\n\tif l.waiters != 1 {\n\t\tt.Fatal(\"counter inc failed\")\n\t}\n\n\tl.dec()\n\tif l.waiters != 0 {\n\t\tt.Fatal(\"counter dec failed\")\n\t}\n}\n\nfunc TestLockerLock(t *testing.T) {\n\tl := New()\n\tl.Lock(\"test\")\n\tctr := l.locks[\"test\"]\n\n\tif ctr.count() != 0 {\n\t\tt.Fatalf(\"expected waiters to be 0, got :%d\", ctr.waiters)\n\t}\n\n\tchDone := make(chan struct{})\n\tgo func() {\n\t\tl.Lock(\"test\")\n\t\tclose(chDone)\n\t}()\n\n\tchWaiting := make(chan struct{})\n\tgo func() {\n\t\tfor range time.Tick(1 * time.Millisecond) {\n\t\t\tif ctr.count() == 1 {\n\t\t\t\tclose(chWaiting)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-chWaiting:\n\tcase <-time.After(3 * time.Second):\n\t\tt.Fatal(\"timed out waiting for lock waiters to be incremented\")\n\t}\n\n\tselect {\n\tcase <-chDone:\n\t\tt.Fatal(\"lock should not have returned while it was still held\")\n\tdefault:\n\t}\n\n\tif err := l.Unlock(\"test\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tselect {\n\tcase <-chDone:\n\tcase <-time.After(3 * time.Second):\n\t\tt.Fatalf(\"lock should have completed\")\n\t}\n\n\tif ctr.count() != 0 {\n\t\tt.Fatalf(\"expected waiters to be 0, got: %d\", ctr.count())\n\t}\n}\n\nfunc TestLockerUnlock(t *testing.T) {\n\tl := New()\n\n\tl.Lock(\"test\")\n\tl.Unlock(\"test\")\n\n\tchDone := make(chan struct{})\n\tgo func() {\n\t\tl.Lock(\"test\")\n\t\tclose(chDone)\n\t}()\n\n\tselect {\n\tcase <-chDone:\n\tcase <-time.After(3 * time.Second):\n\t\tt.Fatalf(\"lock should not be blocked\")\n\t}\n}\n\nfunc TestLockerConcurrency(t *testing.T) {\n\tl := New()\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i <= 10000; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tl.Lock(\"test\")\n\t\t\t// if there is a concurrency issue, will very likely panic here\n\t\t\tl.Unlock(\"test\")\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tchDone := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(chDone)\n\t}()\n\n\tselect {\n\tcase <-chDone:\n\tcase <-time.After(10 * time.Second):\n\t\tt.Fatal(\"timeout waiting for locks to complete\")\n\t}\n\n\t// Since everything has unlocked this should not exist anymore\n\tif ctr, exists := l.locks[\"test\"]; exists {\n\t\tt.Fatalf(\"lock should not exist: %v\", ctr)\n\t}\n}\n\nfunc BenchmarkLocker(b *testing.B) {\n\tl := New()\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Lock(\"test\")\n\t\tl.Unlock(\"test\")\n\t}\n}\n\nfunc BenchmarkLockerParallel(b *testing.B) {\n\tl := New()\n\tb.SetParallelism(128)\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tl.Lock(\"test\")\n\t\t\tl.Unlock(\"test\")\n\t\t}\n\t})\n}\n\nfunc BenchmarkLockerMoreKeys(b *testing.B) {\n\tl := New()\n\tvar keys []string\n\tfor i := 0; i < 64; i++ {\n\t\tkeys = append(keys, strconv.Itoa(i))\n\t}\n\tb.SetParallelism(128)\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tk := keys[rand.Intn(len(keys))]\n\t\t\tl.Lock(k)\n\t\t\tl.Unlock(k)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "internal/protocol/cluster.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nimport (\n\t\"context\"\n\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/tidwall/redcon\"\n)\n\ntype ClusterRoutingTable struct{}\n\nfunc NewClusterRoutingTable() *ClusterRoutingTable {\n\treturn &ClusterRoutingTable{}\n}\n\nfunc (c *ClusterRoutingTable) Command(ctx context.Context) *redis.Cmd {\n\tvar args []interface{}\n\targs = append(args, Cluster.RoutingTable)\n\treturn redis.NewCmd(ctx, args...)\n}\n\nfunc ParseClusterRoutingTable(cmd redcon.Command) (*ClusterRoutingTable, error) {\n\tif len(cmd.Args) > 1 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tc := NewClusterRoutingTable()\n\treturn c, nil\n}\n\ntype ClusterMembers struct{}\n\nfunc NewClusterMembers() *ClusterMembers {\n\treturn &ClusterMembers{}\n}\n\nfunc (c *ClusterMembers) Command(ctx context.Context) *redis.Cmd {\n\tvar args []interface{}\n\targs = append(args, Cluster.Members)\n\treturn redis.NewCmd(ctx, args...)\n}\n\nfunc ParseClusterMembers(cmd redcon.Command) (*ClusterMembers, error) {\n\tif len(cmd.Args) > 1 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tc := NewClusterMembers()\n\treturn c, nil\n}\n"
  },
  {
    "path": "internal/protocol/cluster_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestProtocol_ClusterRoutingTable(t *testing.T) {\n\trtCmd := NewClusterRoutingTable()\n\n\tcmd := stringToCommand(rtCmd.Command(context.Background()).String())\n\t_, err := ParseClusterRoutingTable(cmd)\n\trequire.NoError(t, err)\n\n\tt.Run(\"CLUSTER.ROUTINGTABLE invalid command\", func(t *testing.T) {\n\t\tcmd := stringToCommand(\"cluster routing table foobar\")\n\t\t_, err = ParseClusterRoutingTable(cmd)\n\t\trequire.Error(t, err)\n\t})\n}\n\nfunc TestProtocol_ClusterMembers(t *testing.T) {\n\tmembersCmd := NewClusterMembers()\n\n\tcmd := stringToCommand(membersCmd.Command(context.Background()).String())\n\t_, err := ParseClusterMembers(cmd)\n\trequire.NoError(t, err)\n\n\tt.Run(\"CLUSTER.MEMBERS invalid command\", func(t *testing.T) {\n\t\tcmd := stringToCommand(\"cluster members foobar\")\n\t\t_, err = ParseClusterMembers(cmd)\n\t\trequire.Error(t, err)\n\t})\n}\n"
  },
  {
    "path": "internal/protocol/commands.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nconst StatusOK = \"OK\"\n\ntype ClusterCommands struct {\n\tRoutingTable string\n\tMembers      string\n}\n\nvar Cluster = &ClusterCommands{\n\tRoutingTable: \"cluster.routingtable\",\n\tMembers:      \"cluster.members\",\n}\n\ntype InternalCommands struct {\n\tMoveFragment        string\n\tUpdateRouting       string\n\tLengthOfPart        string\n\tClusterRoutingTable string\n}\n\nvar Internal = &InternalCommands{\n\tMoveFragment:  \"internal.node.movefragment\",\n\tUpdateRouting: \"internal.node.updaterouting\",\n\tLengthOfPart:  \"internal.node.lengthofpart\",\n}\n\ntype GenericCommands struct {\n\tPing  string\n\tStats string\n\tAuth  string\n}\n\nvar Generic = &GenericCommands{\n\tPing:  \"ping\",\n\tStats: \"stats\",\n\tAuth:  \"auth\",\n}\n\ntype DMapCommands struct {\n\tGet         string\n\tGetEntry    string\n\tPut         string\n\tPutEntry    string\n\tDel         string\n\tDelEntry    string\n\tExpire      string\n\tPExpire     string\n\tDestroy     string\n\tQuery       string\n\tIncr        string\n\tDecr        string\n\tGetPut      string\n\tIncrByFloat string\n\tLock        string\n\tUnlock      string\n\tLockLease   string\n\tPLockLease  string\n\tScan        string\n}\n\nvar DMap = &DMapCommands{\n\tGet:         \"dm.get\",\n\tGetEntry:    \"dm.getentry\",\n\tPut:         \"dm.put\",\n\tPutEntry:    \"dm.putentry\",\n\tDel:         \"dm.del\",\n\tDelEntry:    \"dm.delentry\",\n\tExpire:      \"dm.expire\",\n\tPExpire:     \"dm.pexpire\",\n\tDestroy:     \"dm.destroy\",\n\tIncr:        \"dm.incr\",\n\tDecr:        \"dm.decr\",\n\tGetPut:      \"dm.getput\",\n\tIncrByFloat: \"dm.incrbyfloat\",\n\tLock:        \"dm.lock\",\n\tUnlock:      \"dm.unlock\",\n\tLockLease:   \"dm.locklease\",\n\tPLockLease:  \"dm.plocklease\",\n\tScan:        \"dm.scan\",\n}\n\ntype PubSubCommands struct {\n\tPubSub          string\n\tPublish         string\n\tPublishInternal string\n\tSubscribe       string\n\tPSubscribe      string\n\tPubSubChannels  string\n\tPubSubNumpat    string\n\tPubSubNumsub    string\n}\n\nvar PubSub = &PubSubCommands{\n\tPubSub:          \"pubsub\",\n\tPublish:         \"publish\",\n\tPublishInternal: \"publish.internal\",\n\tSubscribe:       \"subscribe\",\n\tPSubscribe:      \"psubscribe\",\n\tPubSubChannels:  \"pubsub channels\",\n\tPubSubNumpat:    \"pubsub numpat\",\n\tPubSubNumsub:    \"pubsub numsub\",\n}\n"
  },
  {
    "path": "internal/protocol/dmap.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/util\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/tidwall/redcon\"\n)\n\ntype Put struct {\n\tDMap  string\n\tKey   string\n\tValue []byte\n\tEX    float64\n\tPX    int64\n\tEXAT  float64\n\tPXAT  int64\n\tNX    bool\n\tXX    bool\n}\n\nfunc NewPut(dmap, key string, value []byte) *Put {\n\treturn &Put{\n\t\tDMap:  dmap,\n\t\tKey:   key,\n\t\tValue: value,\n\t}\n}\n\nfunc (p *Put) SetEX(ex float64) *Put {\n\tp.EX = ex\n\treturn p\n}\n\nfunc (p *Put) SetPX(px int64) *Put {\n\tp.PX = px\n\treturn p\n}\n\nfunc (p *Put) SetEXAT(exat float64) *Put {\n\tp.EXAT = exat\n\treturn p\n}\n\nfunc (p *Put) SetPXAT(pxat int64) *Put {\n\tp.PXAT = pxat\n\treturn p\n}\n\nfunc (p *Put) SetNX() *Put {\n\tp.NX = true\n\treturn p\n}\n\nfunc (p *Put) SetXX() *Put {\n\tp.XX = true\n\treturn p\n}\n\nfunc (p *Put) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.Put)\n\targs = append(args, p.DMap)\n\targs = append(args, p.Key)\n\targs = append(args, p.Value)\n\n\tif p.EX != 0 {\n\t\targs = append(args, \"EX\")\n\t\targs = append(args, p.EX)\n\t}\n\n\tif p.PX != 0 {\n\t\targs = append(args, \"PX\")\n\t\targs = append(args, p.PX)\n\t}\n\n\tif p.EXAT != 0 {\n\t\targs = append(args, \"EXAT\")\n\t\targs = append(args, p.EXAT)\n\t}\n\n\tif p.PXAT != 0 {\n\t\targs = append(args, \"PXAT\")\n\t\targs = append(args, p.PXAT)\n\t}\n\n\tif p.NX {\n\t\targs = append(args, \"NX\")\n\t}\n\n\tif p.XX {\n\t\targs = append(args, \"XX\")\n\t}\n\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\nfunc ParsePutCommand(cmd redcon.Command) (*Put, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tp := NewPut(\n\t\tutil.BytesToString(cmd.Args[1]), // DMap\n\t\tutil.BytesToString(cmd.Args[2]), // Key\n\t\tcmd.Args[3],                     // Value\n\t)\n\n\targs := cmd.Args[4:]\n\tfor len(args) > 0 {\n\t\tswitch arg := strings.ToUpper(util.BytesToString(args[0])); arg {\n\t\tcase \"NX\":\n\t\t\tp.SetNX()\n\t\t\targs = args[1:]\n\t\t\tcontinue\n\t\tcase \"XX\":\n\t\t\tp.SetXX()\n\t\t\targs = args[1:]\n\t\t\tcontinue\n\t\tcase \"PX\":\n\t\t\tpx, err := strconv.ParseInt(util.BytesToString(args[1]), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tp.SetPX(px)\n\t\t\targs = args[2:]\n\t\t\tcontinue\n\t\tcase \"EX\":\n\t\t\tex, err := strconv.ParseFloat(util.BytesToString(args[1]), 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tp.SetEX(ex)\n\t\t\targs = args[2:]\n\t\t\tcontinue\n\t\tcase \"EXAT\":\n\t\t\texat, err := strconv.ParseFloat(util.BytesToString(args[1]), 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tp.SetEXAT(exat)\n\t\t\targs = args[2:]\n\t\t\tcontinue\n\t\tcase \"PXAT\":\n\t\t\tpxat, err := strconv.ParseInt(util.BytesToString(args[1]), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tp.SetPXAT(pxat)\n\t\t\targs = args[2:]\n\t\t\tcontinue\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"syntax error\")\n\t\t}\n\t}\n\n\treturn p, nil\n}\n\ntype PutEntry struct {\n\tDMap  string\n\tKey   string\n\tValue []byte\n}\n\nfunc NewPutEntry(dmap, key string, value []byte) *PutEntry {\n\treturn &PutEntry{\n\t\tDMap:  dmap,\n\t\tKey:   key,\n\t\tValue: value,\n\t}\n}\n\nfunc (p *PutEntry) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.PutEntry)\n\targs = append(args, p.DMap)\n\targs = append(args, p.Key)\n\targs = append(args, p.Value)\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\nfunc ParsePutEntryCommand(cmd redcon.Command) (*PutEntry, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\treturn NewPutEntry(\n\t\tutil.BytesToString(cmd.Args[1]),\n\t\tutil.BytesToString(cmd.Args[2]),\n\t\tcmd.Args[3],\n\t), nil\n}\n\ntype Get struct {\n\tDMap string\n\tKey  string\n\tRaw  bool\n}\n\nfunc NewGet(dmap, key string) *Get {\n\treturn &Get{\n\t\tDMap: dmap,\n\t\tKey:  key,\n\t}\n}\n\nfunc (g *Get) SetRaw() *Get {\n\tg.Raw = true\n\treturn g\n}\n\nfunc (g *Get) Command(ctx context.Context) *redis.StringCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.Get)\n\targs = append(args, g.DMap)\n\targs = append(args, g.Key)\n\tif g.Raw {\n\t\targs = append(args, \"RW\")\n\t}\n\treturn redis.NewStringCmd(ctx, args...)\n}\n\nfunc ParseGetCommand(cmd redcon.Command) (*Get, error) {\n\tif len(cmd.Args) < 3 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tg := NewGet(\n\t\tutil.BytesToString(cmd.Args[1]),\n\t\tutil.BytesToString(cmd.Args[2]),\n\t)\n\n\tif len(cmd.Args) == 4 {\n\t\targ := util.BytesToString(cmd.Args[3])\n\t\tif arg == \"RW\" {\n\t\t\tg.SetRaw()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrInvalidArgument, arg)\n\t\t}\n\t}\n\n\treturn g, nil\n}\n\ntype GetEntry struct {\n\tDMap    string\n\tKey     string\n\tReplica bool\n}\n\nfunc NewGetEntry(dmap, key string) *GetEntry {\n\treturn &GetEntry{\n\t\tDMap: dmap,\n\t\tKey:  key,\n\t}\n}\n\nfunc (g *GetEntry) SetReplica() *GetEntry {\n\tg.Replica = true\n\treturn g\n}\n\nfunc (g *GetEntry) Command(ctx context.Context) *redis.StringCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.GetEntry)\n\targs = append(args, g.DMap)\n\targs = append(args, g.Key)\n\tif g.Replica {\n\t\targs = append(args, \"RC\")\n\t}\n\treturn redis.NewStringCmd(ctx, args...)\n}\n\nfunc ParseGetEntryCommand(cmd redcon.Command) (*GetEntry, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tg := NewGetEntry(\n\t\tutil.BytesToString(cmd.Args[1]), // DMap\n\t\tutil.BytesToString(cmd.Args[2]), // Key\n\t)\n\n\tif len(cmd.Args) == 4 {\n\t\targ := util.BytesToString(cmd.Args[3])\n\t\tif arg == \"RC\" {\n\t\t\tg.SetReplica()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrInvalidArgument, arg)\n\t\t}\n\t}\n\n\treturn g, nil\n}\n\ntype Del struct {\n\tDMap string\n\tKeys []string\n}\n\nfunc NewDel(dmap string, keys ...string) *Del {\n\treturn &Del{\n\t\tDMap: dmap,\n\t\tKeys: keys,\n\t}\n}\n\nfunc (d *Del) Command(ctx context.Context) *redis.IntCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.Del)\n\targs = append(args, d.DMap)\n\tfor _, key := range d.Keys {\n\t\targs = append(args, key)\n\t}\n\treturn redis.NewIntCmd(ctx, args...)\n}\n\nfunc ParseDelCommand(cmd redcon.Command) (*Del, error) {\n\tif len(cmd.Args) < 3 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\td := NewDel(\n\t\tutil.BytesToString(cmd.Args[1]),\n\t)\n\tfor _, key := range cmd.Args[2:] {\n\t\td.Keys = append(d.Keys, util.BytesToString(key))\n\t}\n\treturn d, nil\n}\n\ntype DelEntry struct {\n\tDel     *Del\n\tReplica bool\n}\n\nfunc NewDelEntry(dmap, key string) *DelEntry {\n\treturn &DelEntry{\n\t\tDel: NewDel(dmap, key),\n\t}\n}\n\nfunc (d *DelEntry) SetReplica() *DelEntry {\n\td.Replica = true\n\treturn d\n}\n\nfunc (d *DelEntry) Command(ctx context.Context) *redis.IntCmd {\n\tcmd := d.Del.Command(ctx)\n\targs := cmd.Args()\n\targs[0] = DMap.DelEntry\n\tif d.Replica {\n\t\targs = append(args, \"RC\")\n\t}\n\treturn redis.NewIntCmd(ctx, args...)\n}\n\nfunc ParseDelEntryCommand(cmd redcon.Command) (*DelEntry, error) {\n\tif len(cmd.Args) < 3 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\td := NewDelEntry(\n\t\tutil.BytesToString(cmd.Args[1]),\n\t\tutil.BytesToString(cmd.Args[2]),\n\t)\n\n\tif len(cmd.Args) == 4 {\n\t\targ := util.BytesToString(cmd.Args[3])\n\t\tif arg == \"RC\" {\n\t\t\td.SetReplica()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrInvalidArgument, arg)\n\t\t}\n\t}\n\n\treturn d, nil\n}\n\ntype PExpire struct {\n\tDMap         string\n\tKey          string\n\tMilliseconds time.Duration\n}\n\nfunc NewPExpire(dmap, key string, milliseconds time.Duration) *PExpire {\n\treturn &PExpire{\n\t\tDMap:         dmap,\n\t\tKey:          key,\n\t\tMilliseconds: milliseconds,\n\t}\n}\n\nfunc (p *PExpire) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.PExpire)\n\targs = append(args, p.DMap)\n\targs = append(args, p.Key)\n\targs = append(args, p.Milliseconds.Milliseconds())\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\nfunc ParsePExpireCommand(cmd redcon.Command) (*PExpire, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\trawMilliseconds := util.BytesToString(cmd.Args[3])\n\tmilliseconds, err := strconv.ParseInt(rawMilliseconds, 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp := NewPExpire(\n\t\tutil.BytesToString(cmd.Args[1]), // DMap\n\t\tutil.BytesToString(cmd.Args[2]), // Key\n\t\ttime.Duration(milliseconds*int64(time.Millisecond)),\n\t)\n\treturn p, nil\n}\n\ntype Expire struct {\n\tDMap    string\n\tKey     string\n\tSeconds time.Duration\n}\n\nfunc NewExpire(dmap, key string, seconds time.Duration) *Expire {\n\treturn &Expire{\n\t\tDMap:    dmap,\n\t\tKey:     key,\n\t\tSeconds: seconds,\n\t}\n}\n\nfunc (e *Expire) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.Expire)\n\targs = append(args, e.DMap)\n\targs = append(args, e.Key)\n\targs = append(args, e.Seconds.Seconds())\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\nfunc ParseExpireCommand(cmd redcon.Command) (*Expire, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\trawSeconds := util.BytesToString(cmd.Args[3])\n\tseconds, err := strconv.ParseFloat(rawSeconds, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te := NewExpire(\n\t\tutil.BytesToString(cmd.Args[1]), // DMap\n\t\tutil.BytesToString(cmd.Args[2]), // Key\n\t\ttime.Duration(seconds*float64(time.Second)),\n\t)\n\treturn e, nil\n}\n\ntype Destroy struct {\n\tDMap  string\n\tLocal bool\n}\n\nfunc NewDestroy(dmap string) *Destroy {\n\treturn &Destroy{\n\t\tDMap: dmap,\n\t}\n}\n\nfunc (d *Destroy) SetLocal() *Destroy {\n\td.Local = true\n\treturn d\n}\n\nfunc (d *Destroy) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.Destroy)\n\targs = append(args, d.DMap)\n\tif d.Local {\n\t\targs = append(args, \"LC\")\n\t}\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\nfunc ParseDestroyCommand(cmd redcon.Command) (*Destroy, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\td := NewDestroy(\n\t\tutil.BytesToString(cmd.Args[1]),\n\t)\n\n\tif len(cmd.Args) == 3 {\n\t\targ := util.BytesToString(cmd.Args[2])\n\t\tif arg == \"LC\" {\n\t\t\td.SetLocal()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrInvalidArgument, arg)\n\t\t}\n\t}\n\n\treturn d, nil\n}\n\ntype Scan struct {\n\tPartID  uint64\n\tDMap    string\n\tCursor  uint64\n\tCount   int\n\tMatch   string\n\tReplica bool\n}\n\nfunc NewScan(partID uint64, dmap string, cursor uint64) *Scan {\n\treturn &Scan{\n\t\tPartID: partID,\n\t\tDMap:   dmap,\n\t\tCursor: cursor,\n\t}\n}\n\nfunc (s *Scan) SetMatch(match string) *Scan {\n\ts.Match = match\n\treturn s\n}\n\nfunc (s *Scan) SetCount(count int) *Scan {\n\ts.Count = count\n\treturn s\n}\n\nfunc (s *Scan) SetReplica() *Scan {\n\ts.Replica = true\n\treturn s\n}\n\nfunc (s *Scan) Command(ctx context.Context) *redis.ScanCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.Scan)\n\targs = append(args, s.PartID)\n\targs = append(args, s.DMap)\n\targs = append(args, s.Cursor)\n\tif s.Match != \"\" {\n\t\targs = append(args, \"MATCH\")\n\t\targs = append(args, s.Match)\n\t}\n\tif s.Count != 0 {\n\t\targs = append(args, \"COUNT\")\n\t\targs = append(args, s.Count)\n\t}\n\tif s.Replica {\n\t\targs = append(args, \"RC\")\n\t}\n\treturn redis.NewScanCmd(ctx, nil, args...)\n}\n\nconst DefaultScanCount = 10\n\nfunc ParseScanCommand(cmd redcon.Command) (*Scan, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\trawPartID := util.BytesToString(cmd.Args[1])\n\tpartID, err := strconv.ParseUint(rawPartID, 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawCursor := util.BytesToString(cmd.Args[3])\n\tcursor, err := strconv.ParseUint(rawCursor, 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := NewScan(\n\t\tpartID,\n\t\tutil.BytesToString(cmd.Args[2]), // DMap\n\t\tcursor,\n\t)\n\n\targs := cmd.Args[4:]\n\tfor len(args) > 0 {\n\t\tswitch arg := strings.ToUpper(util.BytesToString(args[0])); arg {\n\t\tcase \"MATCH\":\n\t\t\ts.SetMatch(util.BytesToString(args[1]))\n\t\t\targs = args[2:]\n\t\t\tcontinue\n\t\tcase \"COUNT\":\n\t\t\tcount, err := strconv.Atoi(util.BytesToString(args[1]))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ts.SetCount(count)\n\t\t\targs = args[2:]\n\t\t\tcontinue\n\t\tcase \"RC\":\n\t\t\ts.SetReplica()\n\t\t\targs = args[1:]\n\t\t}\n\t}\n\n\tif s.Count == 0 {\n\t\ts.SetCount(DefaultScanCount)\n\t}\n\n\treturn s, nil\n}\n\ntype Incr struct {\n\tDMap  string\n\tKey   string\n\tDelta int\n}\n\nfunc NewIncr(dmap, key string, delta int) *Incr {\n\treturn &Incr{\n\t\tDMap:  dmap,\n\t\tKey:   key,\n\t\tDelta: delta,\n\t}\n}\n\nfunc (i *Incr) Command(ctx context.Context) *redis.IntCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.Incr)\n\targs = append(args, i.DMap)\n\targs = append(args, i.Key)\n\targs = append(args, i.Delta)\n\treturn redis.NewIntCmd(ctx, args...)\n}\n\nfunc ParseIncrCommand(cmd redcon.Command) (*Incr, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tdelta, err := strconv.Atoi(util.BytesToString(cmd.Args[3]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewIncr(\n\t\tutil.BytesToString(cmd.Args[1]),\n\t\tutil.BytesToString(cmd.Args[2]),\n\t\tdelta,\n\t), nil\n}\n\ntype Decr struct {\n\t*Incr\n}\n\nfunc NewDecr(dmap, key string, delta int) *Decr {\n\treturn &Decr{\n\t\tNewIncr(dmap, key, delta),\n\t}\n}\n\nfunc (d *Decr) Command(ctx context.Context) *redis.IntCmd {\n\tcmd := d.Incr.Command(ctx)\n\tcmd.Args()[0] = DMap.Decr\n\treturn cmd\n}\n\nfunc ParseDecrCommand(cmd redcon.Command) (*Decr, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tdelta, err := strconv.Atoi(util.BytesToString(cmd.Args[3]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewDecr(\n\t\tutil.BytesToString(cmd.Args[1]),\n\t\tutil.BytesToString(cmd.Args[2]),\n\t\tdelta,\n\t), nil\n}\n\ntype GetPut struct {\n\tDMap  string\n\tKey   string\n\tValue []byte\n\tRaw   bool\n}\n\nfunc NewGetPut(dmap, key string, value []byte) *GetPut {\n\treturn &GetPut{\n\t\tDMap:  dmap,\n\t\tKey:   key,\n\t\tValue: value,\n\t}\n}\n\nfunc (g *GetPut) SetRaw() *GetPut {\n\tg.Raw = true\n\treturn g\n}\n\nfunc (g *GetPut) Command(ctx context.Context) *redis.StringCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.GetPut)\n\targs = append(args, g.DMap)\n\targs = append(args, g.Key)\n\targs = append(args, g.Value)\n\tif g.Raw {\n\t\targs = append(args, \"RW\")\n\t}\n\treturn redis.NewStringCmd(ctx, args...)\n}\n\nfunc ParseGetPutCommand(cmd redcon.Command) (*GetPut, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tg := NewGetPut(\n\t\tutil.BytesToString(cmd.Args[1]), // DMap\n\t\tutil.BytesToString(cmd.Args[2]), // Key\n\t\tcmd.Args[3],                     // Value\n\t)\n\n\tif len(cmd.Args) == 5 {\n\t\targ := util.BytesToString(cmd.Args[4])\n\t\tif arg == \"RW\" {\n\t\t\tg.SetRaw()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrInvalidArgument, arg)\n\t\t}\n\t}\n\treturn g, nil\n}\n\ntype IncrByFloat struct {\n\tDMap  string\n\tKey   string\n\tDelta float64\n}\n\nfunc NewIncrByFloat(dmap, key string, delta float64) *IncrByFloat {\n\treturn &IncrByFloat{\n\t\tDMap:  dmap,\n\t\tKey:   key,\n\t\tDelta: delta,\n\t}\n}\n\nfunc (i *IncrByFloat) Command(ctx context.Context) *redis.FloatCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.IncrByFloat)\n\targs = append(args, i.DMap)\n\targs = append(args, i.Key)\n\targs = append(args, i.Delta)\n\treturn redis.NewFloatCmd(ctx, args...)\n}\n\nfunc ParseIncrByFloatCommand(cmd redcon.Command) (*IncrByFloat, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tdelta, err := strconv.ParseFloat(util.BytesToString(cmd.Args[3]), 10)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewIncrByFloat(\n\t\tutil.BytesToString(cmd.Args[1]),\n\t\tutil.BytesToString(cmd.Args[2]),\n\t\tdelta,\n\t), nil\n}\n\ntype Lock struct {\n\tDMap     string\n\tKey      string\n\tDeadline float64\n\tEX       float64\n\tPX       int64\n}\n\nfunc NewLock(dmap, key string, deadline float64) *Lock {\n\treturn &Lock{\n\t\tDMap:     dmap,\n\t\tKey:      key,\n\t\tDeadline: deadline,\n\t}\n}\n\nfunc (l *Lock) SetEX(ex float64) *Lock {\n\tl.EX = ex\n\treturn l\n}\n\nfunc (l *Lock) SetPX(px int64) *Lock {\n\tl.PX = px\n\treturn l\n}\n\nfunc (l *Lock) Command(ctx context.Context) *redis.StringCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.Lock)\n\targs = append(args, l.DMap)\n\targs = append(args, l.Key)\n\targs = append(args, l.Deadline)\n\n\t// Options\n\tif l.EX != 0 {\n\t\targs = append(args, \"EX\")\n\t\targs = append(args, l.EX)\n\t}\n\n\tif l.PX != 0 {\n\t\targs = append(args, \"PX\")\n\t\targs = append(args, l.PX)\n\t}\n\n\treturn redis.NewStringCmd(ctx, args...)\n}\n\nfunc ParseLockCommand(cmd redcon.Command) (*Lock, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tdeadline, err := strconv.ParseFloat(util.BytesToString(cmd.Args[3]), 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tl := NewLock(\n\t\tutil.BytesToString(cmd.Args[1]), // DMap\n\t\tutil.BytesToString(cmd.Args[2]), // Key\n\t\tdeadline,                        // Deadline\n\t)\n\n\t// EX or PX are optional.\n\tif len(cmd.Args) > 4 {\n\t\tif len(cmd.Args) == 5 {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s needs a numerical argument\", ErrInvalidArgument, util.BytesToString(cmd.Args[5]))\n\t\t}\n\n\t\tswitch arg := strings.ToUpper(util.BytesToString(cmd.Args[4])); arg {\n\t\tcase \"PX\":\n\t\t\tpx, err := strconv.ParseInt(util.BytesToString(cmd.Args[5]), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tl.PX = px\n\t\tcase \"EX\":\n\t\t\tex, err := strconv.ParseFloat(util.BytesToString(cmd.Args[5]), 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tl.EX = ex\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrInvalidArgument, arg)\n\t\t}\n\t}\n\n\treturn l, nil\n}\n\ntype Unlock struct {\n\tDMap  string\n\tKey   string\n\tToken string\n}\n\nfunc NewUnlock(dmap, key, token string) *Unlock {\n\treturn &Unlock{\n\t\tDMap:  dmap,\n\t\tKey:   key,\n\t\tToken: token,\n\t}\n}\n\nfunc (u *Unlock) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.Unlock)\n\targs = append(args, u.DMap)\n\targs = append(args, u.Key)\n\targs = append(args, u.Token)\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\nfunc ParseUnlockCommand(cmd redcon.Command) (*Unlock, error) {\n\tif len(cmd.Args) < 4 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\treturn NewUnlock(\n\t\tutil.BytesToString(cmd.Args[1]), // DMap\n\t\tutil.BytesToString(cmd.Args[2]), // Key\n\t\tutil.BytesToString(cmd.Args[3]), // Token\n\t), nil\n}\n\ntype LockLease struct {\n\tDMap    string\n\tKey     string\n\tToken   string\n\tTimeout float64\n}\n\nfunc NewLockLease(dmap, key, token string, timeout float64) *LockLease {\n\treturn &LockLease{\n\t\tDMap:    dmap,\n\t\tKey:     key,\n\t\tToken:   token,\n\t\tTimeout: timeout,\n\t}\n}\n\nfunc (l *LockLease) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.LockLease)\n\targs = append(args, l.DMap)\n\targs = append(args, l.Key)\n\targs = append(args, l.Token)\n\targs = append(args, l.Timeout)\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\nfunc ParseLockLeaseCommand(cmd redcon.Command) (*LockLease, error) {\n\tif len(cmd.Args) < 5 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\ttimeout, err := strconv.ParseFloat(util.BytesToString(cmd.Args[4]), 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewLockLease(\n\t\tutil.BytesToString(cmd.Args[1]), // DMap\n\t\tutil.BytesToString(cmd.Args[2]), // Key\n\t\tutil.BytesToString(cmd.Args[3]), // Token\n\t\ttimeout,                         // Timeout\n\t), nil\n}\n\ntype PLockLease struct {\n\tDMap    string\n\tKey     string\n\tToken   string\n\tTimeout int64\n}\n\nfunc NewPLockLease(dmap, key, token string, timeout int64) *PLockLease {\n\treturn &PLockLease{\n\t\tDMap:    dmap,\n\t\tKey:     key,\n\t\tToken:   token,\n\t\tTimeout: timeout,\n\t}\n}\n\nfunc (p *PLockLease) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\targs = append(args, DMap.PLockLease)\n\targs = append(args, p.DMap)\n\targs = append(args, p.Key)\n\targs = append(args, p.Token)\n\targs = append(args, p.Timeout)\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\nfunc ParsePLockLeaseCommand(cmd redcon.Command) (*PLockLease, error) {\n\tif len(cmd.Args) < 5 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\ttimeout, err := strconv.ParseInt(util.BytesToString(cmd.Args[4]), 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewPLockLease(\n\t\tutil.BytesToString(cmd.Args[1]), // DMap\n\t\tutil.BytesToString(cmd.Args[2]), // Key\n\t\tutil.BytesToString(cmd.Args[3]), // Token\n\t\ttimeout,                         // Timeout\n\t), nil\n}\n"
  },
  {
    "path": "internal/protocol/dmap_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc stringToCommand(s string) redcon.Command {\n\tcmd := redcon.Command{\n\t\tRaw: []byte(s),\n\t}\n\n\ts = strings.TrimSuffix(s, \": []\")\n\ts = strings.TrimSuffix(s, \": 0\")\n\ts = strings.TrimSuffix(s, \":\")\n\ts = strings.TrimSuffix(s, \": \")\n\tparsed := strings.Split(s, \" \")\n\tfor _, arg := range parsed {\n\t\tcmd.Args = append(cmd.Args, []byte(arg))\n\t}\n\treturn cmd\n}\n\nfunc TestProtocol_ParsePutCommand_EX(t *testing.T) {\n\tputCmd := NewPut(\"my-dmap\", \"my-key\", []byte(\"my-value\"))\n\tputCmd.SetEX((10 * time.Second).Seconds())\n\n\tcmd := stringToCommand(putCmd.Command(context.Background()).String())\n\tparsed, err := ParsePutCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, []byte(\"my-value\"), parsed.Value)\n\trequire.Equal(t, float64(10), parsed.EX)\n}\n\nfunc TestProtocol_ParsePutCommand_PX(t *testing.T) {\n\tputCmd := NewPut(\"my-dmap\", \"my-key\", []byte(\"my-value\"))\n\tputCmd.SetPX((100 * time.Millisecond).Milliseconds())\n\n\tcmd := stringToCommand(putCmd.Command(context.Background()).String())\n\tparsed, err := ParsePutCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, []byte(\"my-value\"), parsed.Value)\n\trequire.Equal(t, int64(100), parsed.PX)\n}\n\nfunc TestProtocol_ParsePutCommand_NX(t *testing.T) {\n\tputCmd := NewPut(\"my-dmap\", \"my-key\", []byte(\"my-value\"))\n\tputCmd.SetNX()\n\n\tcmd := stringToCommand(putCmd.Command(context.Background()).String())\n\tparsed, err := ParsePutCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, []byte(\"my-value\"), parsed.Value)\n\trequire.True(t, parsed.NX)\n\trequire.False(t, parsed.XX)\n}\n\nfunc TestProtocol_ParsePutCommand_XX(t *testing.T) {\n\tputCmd := NewPut(\"my-dmap\", \"my-key\", []byte(\"my-value\"))\n\tputCmd.SetXX()\n\n\tcmd := stringToCommand(putCmd.Command(context.Background()).String())\n\tparsed, err := ParsePutCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, []byte(\"my-value\"), parsed.Value)\n\trequire.True(t, parsed.XX)\n\trequire.False(t, parsed.NX)\n}\n\nfunc TestProtocol_ParsePutCommand_EXAT(t *testing.T) {\n\tputCmd := NewPut(\"my-dmap\", \"my-key\", []byte(\"my-value\"))\n\texat := float64(time.Now().Unix()) + 10\n\tputCmd.SetEXAT(exat)\n\n\tcmd := stringToCommand(putCmd.Command(context.Background()).String())\n\tparsed, err := ParsePutCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, []byte(\"my-value\"), parsed.Value)\n\trequire.Equal(t, exat, parsed.EXAT)\n}\n\nfunc TestProtocol_ParsePutCommand_PXAT(t *testing.T) {\n\tputCmd := NewPut(\"my-dmap\", \"my-key\", []byte(\"my-value\"))\n\tpxat := (time.Now().UnixNano() / 1000000) + 10\n\tputCmd.SetPXAT(pxat)\n\n\tcmd := stringToCommand(putCmd.Command(context.Background()).String())\n\tparsed, err := ParsePutCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, []byte(\"my-value\"), parsed.Value)\n\trequire.Equal(t, pxat, parsed.PXAT)\n}\n\nfunc TestProtocol_ParseScanCommand(t *testing.T) {\n\tscanCmd := NewScan(1, \"my-dmap\", 0)\n\n\ts := scanCmd.Command(context.Background()).String()\n\ts = strings.TrimSuffix(s, \": []\")\n\tcmd := stringToCommand(s)\n\tparsed, err := ParseScanCommand(cmd)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"\", parsed.Match)\n\trequire.Equal(t, 10, parsed.Count)\n\trequire.False(t, scanCmd.Replica)\n}\n\nfunc TestProtocol_ParseScanCommand_Replica(t *testing.T) {\n\tscanCmd := NewScan(1, \"my-dmap\", 0).SetReplica()\n\n\ts := scanCmd.Command(context.Background()).String()\n\ts = strings.TrimSuffix(s, \": []\")\n\tcmd := stringToCommand(s)\n\tparsed, err := ParseScanCommand(cmd)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"\", parsed.Match)\n\trequire.Equal(t, 10, parsed.Count)\n\trequire.True(t, scanCmd.Replica)\n}\n\nfunc TestProtocol_ParseScanCommand_Match(t *testing.T) {\n\tscanCmd := NewScan(1, \"my-dmap\", 0).SetMatch(\"^even\")\n\n\ts := scanCmd.Command(context.Background()).String()\n\ts = strings.TrimSuffix(s, \": []\")\n\tcmd := stringToCommand(s)\n\tparsed, err := ParseScanCommand(cmd)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, uint64(1), parsed.PartID)\n\trequire.Equal(t, \"^even\", parsed.Match)\n\trequire.Equal(t, 10, parsed.Count)\n\trequire.False(t, scanCmd.Replica)\n}\n\nfunc TestProtocol_ParseScanCommand_PartID(t *testing.T) {\n\tscanCmd := NewScan(1, \"my-dmap\", 0).SetCount(200)\n\n\ts := scanCmd.Command(context.Background()).String()\n\ts = strings.TrimSuffix(s, \": []\")\n\tcmd := stringToCommand(s)\n\tparsed, err := ParseScanCommand(cmd)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, uint64(1), parsed.PartID)\n\trequire.Equal(t, \"\", parsed.Match)\n\trequire.Equal(t, 200, parsed.Count)\n\trequire.False(t, scanCmd.Replica)\n}\n\nfunc TestProtocol_ParseScanCommand_Match_Count(t *testing.T) {\n\tscanCmd := NewScan(1, \"my-dmap\", 0).SetCount(100).SetMatch(\"^even\")\n\n\ts := scanCmd.Command(context.Background()).String()\n\ts = strings.TrimSuffix(s, \": []\")\n\tcmd := stringToCommand(s)\n\tparsed, err := ParseScanCommand(cmd)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, uint64(1), parsed.PartID)\n\trequire.Equal(t, \"^even\", parsed.Match)\n\trequire.Equal(t, 100, parsed.Count)\n\trequire.False(t, scanCmd.Replica)\n}\n\nfunc TestProtocol_ParseScanCommand_Match_Count_Replica(t *testing.T) {\n\tscanCmd := NewScan(1, \"my-dmap\", 0).\n\t\tSetCount(100).\n\t\tSetMatch(\"^even\").\n\t\tSetReplica()\n\n\ts := scanCmd.Command(context.Background()).String()\n\ts = strings.TrimSuffix(s, \": []\")\n\tcmd := stringToCommand(s)\n\tparsed, err := ParseScanCommand(cmd)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, uint64(1), parsed.PartID)\n\trequire.Equal(t, \"^even\", parsed.Match)\n\trequire.Equal(t, 100, parsed.Count)\n\trequire.True(t, scanCmd.Replica)\n}\n\nfunc TestProtocol_PutEntry(t *testing.T) {\n\tputEntryCmd := NewPutEntry(\"my-dmap\", \"my-key\", []byte(\"my-value\"))\n\n\tcmd := stringToCommand(putEntryCmd.Command(context.Background()).String())\n\tparsed, err := ParsePutEntryCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, []byte(\"my-value\"), parsed.Value)\n}\n\nfunc TestProtocol_Get(t *testing.T) {\n\tgetCmd := NewGet(\"my-dmap\", \"my-key\")\n\n\tcmd := stringToCommand(getCmd.Command(context.Background()).String())\n\tparsed, err := ParseGetCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.False(t, parsed.Raw)\n}\n\nfunc TestProtocol_Get_RW(t *testing.T) {\n\tgetCmd := NewGet(\"my-dmap\", \"my-key\")\n\tgetCmd.SetRaw()\n\n\tcmd := stringToCommand(getCmd.Command(context.Background()).String())\n\tparsed, err := ParseGetCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.True(t, parsed.Raw)\n}\n\nfunc TestProtocol_GetEntry(t *testing.T) {\n\tgetEntryCmd := NewGetEntry(\"my-dmap\", \"my-key\")\n\n\tcmd := stringToCommand(getEntryCmd.Command(context.Background()).String())\n\tparsed, err := ParseGetEntryCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.False(t, parsed.Replica)\n}\n\nfunc TestProtocol_GetEntry_RC(t *testing.T) {\n\tgetEntryCmd := NewGetEntry(\"my-dmap\", \"my-key\")\n\tgetEntryCmd.SetReplica()\n\n\tcmd := stringToCommand(getEntryCmd.Command(context.Background()).String())\n\tparsed, err := ParseGetEntryCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.True(t, parsed.Replica)\n}\n\nfunc TestProtocol_Del(t *testing.T) {\n\tdelCmd := NewDel(\"my-dmap\", \"key1\", \"key2\")\n\n\tcmd := stringToCommand(delCmd.Command(context.Background()).String())\n\tparsed, err := ParseDelCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, []string{\"key1\", \"key2\"}, parsed.Keys)\n}\n\nfunc TestProtocol_DelEntry(t *testing.T) {\n\tdelEntryCmd := NewDelEntry(\"my-dmap\", \"my-key\")\n\n\tcmd := stringToCommand(delEntryCmd.Command(context.Background()).String())\n\tparsed, err := ParseDelEntryCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.Del.DMap)\n\trequire.Equal(t, []string{\"my-key\"}, parsed.Del.Keys)\n\trequire.False(t, parsed.Replica)\n}\n\nfunc TestProtocol_DelEntry_RC(t *testing.T) {\n\tdelEntryCmd := NewDelEntry(\"my-dmap\", \"my-key\")\n\tdelEntryCmd.SetReplica()\n\n\tcmd := stringToCommand(delEntryCmd.Command(context.Background()).String())\n\tparsed, err := ParseDelEntryCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.Del.DMap)\n\trequire.Equal(t, []string{\"my-key\"}, parsed.Del.Keys)\n\trequire.True(t, parsed.Replica)\n}\n\nfunc TestProtocol_PExpire(t *testing.T) {\n\tpexpireCmd := NewPExpire(\"my-dmap\", \"my-key\", 10*time.Millisecond)\n\n\tcmd := stringToCommand(pexpireCmd.Command(context.Background()).String())\n\tparsed, err := ParsePExpireCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, 10*time.Millisecond, parsed.Milliseconds)\n}\n\nfunc TestProtocol_Expire(t *testing.T) {\n\tpexpireCmd := NewExpire(\"my-dmap\", \"my-key\", 10*time.Second)\n\n\tcmd := stringToCommand(pexpireCmd.Command(context.Background()).String())\n\tparsed, err := ParseExpireCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, 10*time.Second, parsed.Seconds)\n}\n\nfunc TestProtocol_Destroy(t *testing.T) {\n\tdestroyCmd := NewDestroy(\"my-dmap\")\n\n\tcmd := stringToCommand(destroyCmd.Command(context.Background()).String())\n\tparsed, err := ParseDestroyCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.False(t, parsed.Local)\n}\n\nfunc TestProtocol_Destroy_Local(t *testing.T) {\n\tdestroyCmd := NewDestroy(\"my-dmap\")\n\tdestroyCmd.SetLocal()\n\n\tcmd := stringToCommand(destroyCmd.Command(context.Background()).String())\n\tparsed, err := ParseDestroyCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.True(t, parsed.Local)\n}\n\nfunc TestProtocol_Incr(t *testing.T) {\n\tincrCmd := NewIncr(\"my-dmap\", \"my-key\", 7)\n\n\tcmd := stringToCommand(incrCmd.Command(context.Background()).String())\n\tparsed, err := ParseIncrCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, 7, parsed.Delta)\n}\n\nfunc TestProtocol_Decr(t *testing.T) {\n\tdecrCmd := NewDecr(\"my-dmap\", \"my-key\", 7)\n\n\tcmd := stringToCommand(decrCmd.Command(context.Background()).String())\n\tparsed, err := ParseDecrCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, 7, parsed.Delta)\n}\n\nfunc TestProtocol_GetPut(t *testing.T) {\n\tgetputCmd := NewGetPut(\"my-dmap\", \"my-key\", []byte(\"my-value\"))\n\n\tcmd := stringToCommand(getputCmd.Command(context.Background()).String())\n\tparsed, err := ParseGetPutCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, []byte(\"my-value\"), parsed.Value)\n\trequire.False(t, parsed.Raw)\n}\n\nfunc TestProtocol_GetPut_RW(t *testing.T) {\n\tgetputCmd := NewGetPut(\"my-dmap\", \"my-key\", []byte(\"my-value\"))\n\tgetputCmd.SetRaw()\n\n\tcmd := stringToCommand(getputCmd.Command(context.Background()).String())\n\tparsed, err := ParseGetPutCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, []byte(\"my-value\"), parsed.Value)\n\trequire.True(t, parsed.Raw)\n}\n\nfunc TestProtocol_IncrByFloat(t *testing.T) {\n\tincrByFloatCmd := NewIncrByFloat(\"my-dmap\", \"my-key\", 3.14159265359)\n\n\tcmd := stringToCommand(incrByFloatCmd.Command(context.Background()).String())\n\tparsed, err := ParseIncrByFloatCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, 3.14159265359, parsed.Delta)\n}\n\nfunc TestProtocol_Lock(t *testing.T) {\n\tlockCmd := NewLock(\"my-dmap\", \"my-key\", 7)\n\n\tcmd := stringToCommand(lockCmd.Command(context.Background()).String())\n\tparsed, err := ParseLockCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, float64(7), parsed.Deadline)\n}\n\nfunc TestProtocol_Lock_EX(t *testing.T) {\n\texDuration := (250 * time.Second).Seconds()\n\tlockCmd := NewLock(\"my-dmap\", \"my-key\", 7)\n\tlockCmd.SetEX(exDuration)\n\n\tcmd := stringToCommand(lockCmd.Command(context.Background()).String())\n\tparsed, err := ParseLockCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, float64(7), parsed.Deadline)\n\trequire.Equal(t, exDuration, parsed.EX)\n}\n\nfunc TestProtocol_Lock_PX(t *testing.T) {\n\tpxDuration := (250 * time.Millisecond).Milliseconds()\n\tlockCmd := NewLock(\"my-dmap\", \"my-key\", 7)\n\tlockCmd.SetPX(pxDuration)\n\n\tcmd := stringToCommand(lockCmd.Command(context.Background()).String())\n\tparsed, err := ParseLockCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, float64(7), parsed.Deadline)\n\trequire.Equal(t, pxDuration, parsed.PX)\n}\n\nfunc TestProtocol_Unlock(t *testing.T) {\n\tunlockCmd := NewUnlock(\"my-dmap\", \"my-key\", \"token\")\n\n\tcmd := stringToCommand(unlockCmd.Command(context.Background()).String())\n\tparsed, err := ParseUnlockCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, \"token\", parsed.Token)\n}\n\nfunc TestProtocol_LockLease(t *testing.T) {\n\ttimeout := (7 * time.Second).Seconds()\n\tunlockCmd := NewLockLease(\"my-dmap\", \"my-key\", \"token\", timeout)\n\n\tcmd := stringToCommand(unlockCmd.Command(context.Background()).String())\n\tparsed, err := ParseLockLeaseCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, \"token\", parsed.Token)\n\trequire.Equal(t, timeout, parsed.Timeout)\n}\n\nfunc TestProtocol_PLockLease(t *testing.T) {\n\ttimeout := (250 * time.Millisecond).Milliseconds()\n\tplockleaseCmd := NewPLockLease(\"my-dmap\", \"my-key\", \"token\", timeout)\n\n\tcmd := stringToCommand(plockleaseCmd.Command(context.Background()).String())\n\tparsed, err := ParsePLockLeaseCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, \"my-key\", parsed.Key)\n\trequire.Equal(t, \"token\", parsed.Token)\n\trequire.Equal(t, timeout, parsed.Timeout)\n}\n\nfunc TestProtocol_Scan(t *testing.T) {\n\tscanCmd := NewScan(17, \"my-dmap\", 234)\n\n\tcmd := stringToCommand(scanCmd.Command(context.Background()).String())\n\tparsed, err := ParseScanCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, uint64(17), parsed.PartID)\n\trequire.Equal(t, uint64(234), parsed.Cursor)\n\trequire.False(t, parsed.Replica)\n\trequire.Equal(t, DefaultScanCount, parsed.Count)\n\trequire.Equal(t, \"\", parsed.Match)\n}\n\nfunc TestProtocol_Scan_Count_Match_Replica(t *testing.T) {\n\tscanCmd := NewScan(17, \"my-dmap\", 234)\n\tscanCmd.SetCount(123)\n\tscanCmd.SetMatch(\"^even:\")\n\tscanCmd.SetReplica()\n\n\tcmd := stringToCommand(scanCmd.Command(context.Background()).String())\n\tparsed, err := ParseScanCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-dmap\", parsed.DMap)\n\trequire.Equal(t, uint64(17), parsed.PartID)\n\trequire.Equal(t, uint64(234), parsed.Cursor)\n\trequire.True(t, parsed.Replica)\n\trequire.Equal(t, 123, parsed.Count)\n\trequire.Equal(t, \"^even:\", parsed.Match)\n}\n"
  },
  {
    "path": "internal/protocol/errors.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/tidwall/redcon\"\n)\n\nvar ErrInvalidArgument = errors.New(\"invalid argument\")\n\nvar GenericError = \"ERR\"\n\nvar errorWithPrefix = struct {\n\tmtx    sync.RWMutex\n\tprefix map[string]error\n\terr    map[error]string\n}{\n\tprefix: make(map[string]error),\n\terr:    make(map[error]string),\n}\n\nfunc init() {\n\tSetError(\"INVALIDARGUMENT\", ErrInvalidArgument)\n}\n\nfunc SetError(prefix string, err error) {\n\terrorWithPrefix.mtx.Lock()\n\tdefer errorWithPrefix.mtx.Unlock()\n\n\te, ok := errorWithPrefix.prefix[prefix]\n\tif ok && e != err {\n\t\tpanic(fmt.Sprintf(\"prefix collision: %s: %v != %v\", prefix, err, e))\n\t}\n\terrorWithPrefix.err[err] = prefix\n\terrorWithPrefix.prefix[prefix] = err\n}\n\nfunc GetError(prefix string) error {\n\terrorWithPrefix.mtx.RLock()\n\tdefer errorWithPrefix.mtx.RUnlock()\n\n\treturn errorWithPrefix.prefix[prefix]\n}\n\nfunc getPrefix(err error) string {\n\tprefix, ok := errorWithPrefix.err[err]\n\tif !ok {\n\t\treturn GenericError\n\t}\n\treturn prefix\n}\n\nfunc GetPrefix(err error) string {\n\terrorWithPrefix.mtx.RLock()\n\tdefer errorWithPrefix.mtx.RUnlock()\n\n\tprefix := getPrefix(err)\n\tif prefix == GenericError {\n\t\treturn getPrefix(errors.Unwrap(err))\n\t}\n\treturn prefix\n}\n\nfunc ConvertError(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tparsed := strings.SplitN(err.Error(), \" \", 2)\n\tif perr := GetError(parsed[0]); perr != nil {\n\t\treturn perr\n\t}\n\n\tif len(parsed) > 1 {\n\t\treturn fmt.Errorf(\"%s\", parsed[1])\n\t}\n\n\treturn err\n}\n\nfunc WriteError(conn redcon.Conn, err error) {\n\tprefix := GetPrefix(err)\n\tconn.WriteError(fmt.Sprintf(\"%s %s\", prefix, err.Error()))\n}\n\nfunc errWrongNumber(args [][]byte) error {\n\tsb := strings.Builder{}\n\tfor {\n\t\targ := args[0]\n\t\tsb.Write(arg)\n\t\targs = args[1:]\n\t\tif len(args) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tsb.WriteByte(0x20)\n\t}\n\treturn fmt.Errorf(\"wrong number of arguments for '%s' command\", strings.ToLower(sb.String()))\n}\n"
  },
  {
    "path": "internal/protocol/errors_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nvar errSomethingWentWrong = errors.New(\"something went wrong\")\n\nfunc TestProtocol_errWrongNumber(t *testing.T) {\n\tgetCmd := NewGet(\"my-dmap\", \"my-key\").Command(context.Background())\n\tcmd := stringToCommand(getCmd.String())\n\n\terr := errWrongNumber(cmd.Args)\n\trequire.Equal(t, \"wrong number of arguments for 'dm.get my-dmap my-key' command\", err.Error())\n}\n\nfunc TestProtocol_GetPrefix(t *testing.T) {\n\tSetError(\"WRONG\", errSomethingWentWrong)\n\tprefix := GetPrefix(errSomethingWentWrong)\n\trequire.Equal(t, \"WRONG\", prefix)\n}\n\nfunc TestProtocol_GetError(t *testing.T) {\n\tSetError(\"WRONG\", errSomethingWentWrong)\n\terr := GetError(\"WRONG\")\n\trequire.ErrorIs(t, err, errSomethingWentWrong)\n}\n\nfunc TestProtocol_ConvertError(t *testing.T) {\n\tSetError(\"WRONG\", errSomethingWentWrong)\n\terr := fmt.Errorf(\"WRONG %s\", errSomethingWentWrong.Error())\n\tcerr := ConvertError(err)\n\trequire.ErrorIs(t, cerr, errSomethingWentWrong)\n}\n"
  },
  {
    "path": "internal/protocol/pubsub.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nimport (\n\t\"context\"\n\n\t\"github.com/olric-data/olric/internal/util\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/tidwall/redcon\"\n)\n\ntype Publish struct {\n\tChannel string\n\tMessage string\n}\n\nfunc NewPublish(channel, message string) *Publish {\n\treturn &Publish{\n\t\tChannel: channel,\n\t\tMessage: message,\n\t}\n}\n\nfunc (p *Publish) Command(ctx context.Context) *redis.IntCmd {\n\tvar args []interface{}\n\targs = append(args, PubSub.Publish)\n\targs = append(args, p.Channel)\n\targs = append(args, p.Message)\n\treturn redis.NewIntCmd(ctx, args...)\n}\n\nfunc ParsePublishCommand(cmd redcon.Command) (*Publish, error) {\n\tif len(cmd.Args) < 3 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\treturn NewPublish(\n\t\tutil.BytesToString(cmd.Args[1]), // Channel\n\t\tutil.BytesToString(cmd.Args[2]), // Message\n\t), nil\n}\n\ntype PublishInternal struct {\n\tChannel string\n\tMessage string\n}\n\nfunc NewPublishInternal(channel, message string) *PublishInternal {\n\treturn &PublishInternal{\n\t\tChannel: channel,\n\t\tMessage: message,\n\t}\n}\n\nfunc (p *PublishInternal) Command(ctx context.Context) *redis.IntCmd {\n\tvar args []interface{}\n\targs = append(args, PubSub.PublishInternal)\n\targs = append(args, p.Channel)\n\targs = append(args, p.Message)\n\treturn redis.NewIntCmd(ctx, args...)\n}\n\nfunc ParsePublishInternalCommand(cmd redcon.Command) (*PublishInternal, error) {\n\tif len(cmd.Args) < 3 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\treturn NewPublishInternal(\n\t\tutil.BytesToString(cmd.Args[1]), // Channel\n\t\tutil.BytesToString(cmd.Args[2]), // Message\n\t), nil\n}\n\ntype Subscribe struct {\n\tChannels []string\n}\n\nfunc NewSubscribe(channels ...string) *Subscribe {\n\treturn &Subscribe{\n\t\tChannels: channels,\n\t}\n}\n\nfunc (s *Subscribe) Command(ctx context.Context) *redis.SliceCmd {\n\tvar args []interface{}\n\targs = append(args, PubSub.Subscribe)\n\tfor _, channel := range s.Channels {\n\t\targs = append(args, channel)\n\t}\n\treturn redis.NewSliceCmd(ctx, args...)\n}\n\nfunc ParseSubscribeCommand(cmd redcon.Command) (*Subscribe, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tvar channels []string\n\targs := cmd.Args[1:]\n\tfor len(args) > 0 {\n\t\targ := util.BytesToString(args[0])\n\t\tchannels = append(channels, arg)\n\t\targs = args[1:]\n\t}\n\treturn NewSubscribe(channels...), nil\n}\n\ntype PSubscribe struct {\n\tPatterns []string\n}\n\nfunc NewPSubscribe(patterns ...string) *PSubscribe {\n\treturn &PSubscribe{\n\t\tPatterns: patterns,\n\t}\n}\n\nfunc (s *PSubscribe) Command(ctx context.Context) *redis.SliceCmd {\n\tvar args []interface{}\n\targs = append(args, PubSub.Subscribe)\n\tfor _, channel := range s.Patterns {\n\t\targs = append(args, channel)\n\t}\n\treturn redis.NewSliceCmd(ctx, args...)\n}\n\nfunc ParsePSubscribeCommand(cmd redcon.Command) (*PSubscribe, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tvar patterns []string\n\targs := cmd.Args[1:]\n\tfor len(args) > 0 {\n\t\targ := util.BytesToString(args[0])\n\t\tpatterns = append(patterns, arg)\n\t\targs = args[1:]\n\t}\n\treturn NewPSubscribe(patterns...), nil\n}\n\ntype PubSubChannels struct {\n\tPattern string\n}\n\nfunc NewPubSubChannels() *PubSubChannels {\n\treturn &PubSubChannels{}\n}\n\nfunc (ps *PubSubChannels) SetPattern(pattern string) *PubSubChannels {\n\tps.Pattern = pattern\n\treturn ps\n}\n\nfunc (ps *PubSubChannels) Command(ctx context.Context) *redis.SliceCmd {\n\tvar args []interface{}\n\targs = append(args, PubSub.PubSubChannels)\n\tif ps.Pattern != \"\" {\n\t\targs = append(args, ps.Pattern)\n\t}\n\treturn redis.NewSliceCmd(ctx, args...)\n}\n\nfunc ParsePubSubChannelsCommand(cmd redcon.Command) (*PubSubChannels, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tps := NewPubSubChannels()\n\tif len(cmd.Args) >= 3 {\n\t\tps.SetPattern(util.BytesToString(cmd.Args[2]))\n\t}\n\treturn ps, nil\n}\n\ntype PubSubNumpat struct{}\n\nfunc NewPubSubNumpat() *PubSubNumpat {\n\treturn &PubSubNumpat{}\n}\n\nfunc (ps *PubSubNumpat) Command(ctx context.Context) *redis.IntCmd {\n\tvar args []interface{}\n\targs = append(args, PubSub.PubSubNumpat)\n\treturn redis.NewIntCmd(ctx, args...)\n}\n\nfunc ParsePubSubNumpatCommand(cmd redcon.Command) (*PubSubNumpat, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\treturn NewPubSubNumpat(), nil\n}\n\ntype PubSubNumsub struct {\n\tChannels []string\n}\n\nfunc NewPubSubNumsub(channels ...string) *PubSubNumsub {\n\treturn &PubSubNumsub{\n\t\tChannels: channels,\n\t}\n}\n\nfunc (ps *PubSubNumsub) Command(ctx context.Context) *redis.SliceCmd {\n\tvar args []interface{}\n\targs = append(args, PubSub.PubSubNumsub)\n\tfor _, channel := range ps.Channels {\n\t\targs = append(args, channel)\n\t}\n\treturn redis.NewSliceCmd(ctx, args...)\n}\n\nfunc ParsePubSubNumsubCommand(cmd redcon.Command) (*PubSubNumsub, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tvar channels []string\n\targs := cmd.Args[2:]\n\tfor len(args) > 0 {\n\t\targ := util.BytesToString(args[0])\n\t\tchannels = append(channels, arg)\n\t\targs = args[1:]\n\t}\n\treturn NewPubSubNumsub(channels...), nil\n}\n"
  },
  {
    "path": "internal/protocol/pubsub_test.go",
    "content": "package protocol\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestProtocol_ParsePublishCommand(t *testing.T) {\n\tpublishCmd := NewPublish(\"my-pubsub\", \"my-message\")\n\n\tcmd := stringToCommand(publishCmd.Command(context.Background()).String())\n\tparsed, err := ParsePublishCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-pubsub\", parsed.Channel)\n\trequire.Equal(t, \"my-message\", parsed.Message)\n}\n\nfunc TestProtocol_ParsePublishInternalCommand(t *testing.T) {\n\tpublishIntCmd := NewPublishInternal(\"my-pubsub\", \"my-message\")\n\n\tcmd := stringToCommand(publishIntCmd.Command(context.Background()).String())\n\tparsed, err := ParsePublishInternalCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"my-pubsub\", parsed.Channel)\n\trequire.Equal(t, \"my-message\", parsed.Message)\n}\n\nfunc TestProtocol_ParseSubscribeCommand(t *testing.T) {\n\tsubscribeCmd := NewSubscribe(\"channel-1\", \"channel-2\", \"channel-3\")\n\n\tcmd := stringToCommand(subscribeCmd.Command(context.Background()).String())\n\tparsed, err := ParseSubscribeCommand(cmd)\n\trequire.NoError(t, err)\n\n\tchannels := []string{\"channel-1\", \"channel-2\", \"channel-3\"}\n\trequire.Equal(t, channels, parsed.Channels)\n}\n\nfunc TestProtocol_ParsePSubscribeCommand(t *testing.T) {\n\tpsubscribeCmd := NewPSubscribe(\"ch?nnel-*\")\n\n\tcmd := stringToCommand(psubscribeCmd.Command(context.Background()).String())\n\tparsed, err := ParsePSubscribeCommand(cmd)\n\trequire.NoError(t, err)\n\n\tpatterns := []string{\"ch?nnel-*\"}\n\trequire.Equal(t, patterns, parsed.Patterns)\n}\n\nfunc TestProtocol_PubSubChannels(t *testing.T) {\n\tpubsubChannelsCmd := NewPubSubChannels()\n\n\tcmd := stringToCommand(pubsubChannelsCmd.Command(context.Background()).String())\n\tparsed, err := ParsePubSubChannelsCommand(cmd)\n\trequire.NoError(t, err)\n\trequire.Empty(t, parsed.Pattern)\n}\n\nfunc TestProtocol_PubSubChannels_Patterns(t *testing.T) {\n\tpubsubChannelsCmd := NewPubSubChannels()\n\tpubsubChannelsCmd.SetPattern(\"ch?nnel-*\")\n\n\tcmd := stringToCommand(pubsubChannelsCmd.Command(context.Background()).String())\n\tparsed, err := ParsePubSubChannelsCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"ch?nnel-*\", parsed.Pattern)\n}\n\nfunc TestProtocol_PubSubNumpat(t *testing.T) {\n\tpubsubNumpatCmd := NewPubSubNumpat()\n\n\tcmd := stringToCommand(pubsubNumpatCmd.Command(context.Background()).String())\n\t_, err := ParsePubSubNumpatCommand(cmd)\n\trequire.NoError(t, err)\n}\n\nfunc TestProtocol_PubSubNumsub(t *testing.T) {\n\tpubsubNumsubCmd := NewPubSubNumsub(\"channel-1\", \"channel-2\", \"channel-3\")\n\n\tcmd := stringToCommand(pubsubNumsubCmd.Command(context.Background()).String())\n\tparsed, err := ParsePubSubNumsubCommand(cmd)\n\trequire.NoError(t, err)\n\n\tchannels := []string{\"channel-1\", \"channel-2\", \"channel-3\"}\n\trequire.Equal(t, channels, parsed.Channels)\n}\n"
  },
  {
    "path": "internal/protocol/system.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/olric-data/olric/internal/util\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/tidwall/redcon\"\n)\n\ntype Ping struct {\n\tMessage string\n}\n\nfunc NewPing() *Ping {\n\treturn &Ping{}\n}\n\nfunc (p *Ping) SetMessage(m string) *Ping {\n\tp.Message = m\n\treturn p\n}\n\nfunc (p *Ping) Command(ctx context.Context) *redis.StringCmd {\n\tvar args []interface{}\n\targs = append(args, Generic.Ping)\n\tif p.Message != \"\" {\n\t\targs = append(args, p.Message)\n\t}\n\treturn redis.NewStringCmd(ctx, args...)\n}\n\nfunc ParsePingCommand(cmd redcon.Command) (*Ping, error) {\n\tif len(cmd.Args) < 1 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\tp := NewPing()\n\tif len(cmd.Args) == 2 {\n\t\tp.SetMessage(util.BytesToString(cmd.Args[1]))\n\t}\n\treturn p, nil\n}\n\ntype MoveFragment struct {\n\tPayload []byte\n}\n\nfunc NewMoveFragment(payload []byte) *MoveFragment {\n\treturn &MoveFragment{\n\t\tPayload: payload,\n\t}\n}\n\nfunc (m *MoveFragment) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\targs = append(args, Internal.MoveFragment)\n\targs = append(args, m.Payload)\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\nfunc ParseMoveFragmentCommand(cmd redcon.Command) (*MoveFragment, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\treturn NewMoveFragment(cmd.Args[1]), nil\n}\n\ntype UpdateRouting struct {\n\tPayload       []byte\n\tCoordinatorID uint64\n}\n\nfunc NewUpdateRouting(payload []byte, coordinatorID uint64) *UpdateRouting {\n\treturn &UpdateRouting{\n\t\tPayload:       payload,\n\t\tCoordinatorID: coordinatorID,\n\t}\n}\n\nfunc (u *UpdateRouting) Command(ctx context.Context) *redis.StringCmd {\n\tvar args []interface{}\n\targs = append(args, Internal.UpdateRouting)\n\targs = append(args, u.Payload)\n\targs = append(args, u.CoordinatorID)\n\treturn redis.NewStringCmd(ctx, args...)\n}\n\nfunc ParseUpdateRoutingCommand(cmd redcon.Command) (*UpdateRouting, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\tcoordinatorID, err := strconv.ParseUint(util.BytesToString(cmd.Args[2]), 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewUpdateRouting(cmd.Args[1], coordinatorID), nil\n}\n\ntype LengthOfPart struct {\n\tPartID  uint64\n\tReplica bool\n}\n\nfunc NewLengthOfPart(partID uint64) *LengthOfPart {\n\treturn &LengthOfPart{\n\t\tPartID: partID,\n\t}\n}\n\nfunc (l *LengthOfPart) SetReplica() *LengthOfPart {\n\tl.Replica = true\n\treturn l\n}\n\nfunc (l *LengthOfPart) Command(ctx context.Context) *redis.IntCmd {\n\tvar args []interface{}\n\targs = append(args, Internal.LengthOfPart)\n\targs = append(args, l.PartID)\n\tif l.Replica {\n\t\targs = append(args, \"RC\")\n\t}\n\treturn redis.NewIntCmd(ctx, args...)\n}\n\nfunc ParseLengthOfPartCommand(cmd redcon.Command) (*LengthOfPart, error) {\n\tif len(cmd.Args) < 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\tpartID, err := strconv.ParseUint(util.BytesToString(cmd.Args[1]), 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tl := NewLengthOfPart(partID)\n\tif len(cmd.Args) == 3 {\n\t\targ := util.BytesToString(cmd.Args[2])\n\t\tif arg == \"RC\" {\n\t\t\tl.SetReplica()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrInvalidArgument, arg)\n\t\t}\n\t}\n\n\treturn l, nil\n}\n\ntype Stats struct {\n\tCollectRuntime bool\n}\n\nfunc NewStats() *Stats {\n\treturn &Stats{}\n}\n\nfunc (s *Stats) SetCollectRuntime() *Stats {\n\ts.CollectRuntime = true\n\treturn s\n}\n\nfunc (s *Stats) Command(ctx context.Context) *redis.StringCmd {\n\tvar args []interface{}\n\targs = append(args, Generic.Stats)\n\tif s.CollectRuntime {\n\t\targs = append(args, \"CR\")\n\t}\n\treturn redis.NewStringCmd(ctx, args...)\n}\n\nfunc ParseStatsCommand(cmd redcon.Command) (*Stats, error) {\n\tif len(cmd.Args) < 1 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\ts := NewStats()\n\tif len(cmd.Args) == 2 {\n\t\targ := util.BytesToString(cmd.Args[1])\n\t\tif arg == \"CR\" {\n\t\t\ts.SetCollectRuntime()\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrInvalidArgument, arg)\n\t\t}\n\t}\n\n\treturn s, nil\n}\n\n// Auth represents a structure for authentication containing a password.\ntype Auth struct {\n\tPassword string\n}\n\n// NewAuth creates and returns a new Auth instance initialized with the given password.\nfunc NewAuth(password string) *Auth {\n\treturn &Auth{\n\t\tPassword: password,\n\t}\n}\n\n// Command constructs a Redis AUTH command using the provided authentication password from the Auth instance.\nfunc (a *Auth) Command(ctx context.Context) *redis.StatusCmd {\n\tvar args []interface{}\n\n\targs = append(args, Generic.Auth)\n\targs = append(args, a.Password)\n\n\treturn redis.NewStatusCmd(ctx, args...)\n}\n\n// ParseAuthCommand parses a redcon.Command to create an Auth instance and validates command arguments.\nfunc ParseAuthCommand(cmd redcon.Command) (*Auth, error) {\n\tif len(cmd.Args) != 2 {\n\t\treturn nil, errWrongNumber(cmd.Args)\n\t}\n\n\treturn NewAuth(\n\t\tutil.BytesToString(cmd.Args[1]),\n\t), nil\n}\n"
  },
  {
    "path": "internal/protocol/system_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage protocol\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestProtocol_Ping(t *testing.T) {\n\tping := NewPing()\n\n\tcmd := stringToCommand(ping.Command(context.Background()).String())\n\tparsed, err := ParsePingCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"\", parsed.Message)\n}\n\nfunc TestProtocol_Ping_Message(t *testing.T) {\n\tping := NewPing()\n\tping.SetMessage(\"message\")\n\n\tcmd := stringToCommand(ping.Command(context.Background()).String())\n\tparsed, err := ParsePingCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"message\", parsed.Message)\n}\n\nfunc TestProtocol_MoveFragment(t *testing.T) {\n\tmoveFragmentCmd := NewMoveFragment([]byte(\"payload\"))\n\n\tcmd := stringToCommand(moveFragmentCmd.Command(context.Background()).String())\n\tparsed, err := ParseMoveFragmentCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, []byte(\"payload\"), parsed.Payload)\n}\n\nfunc TestProtocol_UpdateRoutingTable(t *testing.T) {\n\tupdateRoutingTableCmd := NewUpdateRouting([]byte(\"payload\"), 123)\n\n\tcmd := stringToCommand(updateRoutingTableCmd.Command(context.Background()).String())\n\tparsed, err := ParseUpdateRoutingCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, []byte(\"payload\"), parsed.Payload)\n\trequire.Equal(t, uint64(123), parsed.CoordinatorID)\n}\n\nfunc TestProtocol_LengthOfPart(t *testing.T) {\n\tupdateRoutingTableCmd := NewLengthOfPart(123)\n\n\tcmd := stringToCommand(updateRoutingTableCmd.Command(context.Background()).String())\n\tparsed, err := ParseLengthOfPartCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, uint64(123), parsed.PartID)\n\trequire.False(t, parsed.Replica)\n}\n\nfunc TestProtocol_LengthOfPart_RC(t *testing.T) {\n\tupdateRoutingTableCmd := NewLengthOfPart(123)\n\tupdateRoutingTableCmd.SetReplica()\n\n\tcmd := stringToCommand(updateRoutingTableCmd.Command(context.Background()).String())\n\tparsed, err := ParseLengthOfPartCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, uint64(123), parsed.PartID)\n\trequire.True(t, parsed.Replica)\n}\n\nfunc TestProtocol_Stats(t *testing.T) {\n\tstatsCmd := NewStats()\n\n\tcmd := stringToCommand(statsCmd.Command(context.Background()).String())\n\tparsed, err := ParseStatsCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.False(t, parsed.CollectRuntime)\n}\n\nfunc TestProtocol_Stats_CR(t *testing.T) {\n\tstatsCmd := NewStats()\n\tstatsCmd.SetCollectRuntime()\n\n\tcmd := stringToCommand(statsCmd.Command(context.Background()).String())\n\tparsed, err := ParseStatsCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.True(t, parsed.CollectRuntime)\n}\n\nfunc TestProtocol_Auth(t *testing.T) {\n\tauth := NewAuth(\"secret\")\n\n\tcmd := stringToCommand(auth.Command(context.Background()).String())\n\tparsed, err := ParseAuthCommand(cmd)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, \"secret\", parsed.Password)\n}\n\nfunc TestProtocol_Auth_errWrongNumber(t *testing.T) {\n\tcmd := stringToCommand(\"auth:\")\n\n\t_, err := ParseAuthCommand(cmd)\n\trequire.Error(t, err)\n\trequire.Equal(t, \"wrong number of arguments for 'auth' command\", err.Error())\n}\n"
  },
  {
    "path": "internal/pubsub/handlers.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pubsub\n\nimport (\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc (s *Service) subscribeCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tsubscribeCmd, err := protocol.ParseSubscribeCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tfor _, channel := range subscribeCmd.Channels {\n\t\ts.pubsub.Subscribe(conn, channel)\n\t\tCurrentSubscribers.Increase(1)\n\t\tSubscribersTotal.Increase(1)\n\t}\n}\n\nfunc (s *Service) publishCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tpublishCmd, err := protocol.ParsePublishCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tvar total int\n\tmembers := s.rt.Discovery().GetMembers()\n\tfor _, member := range members {\n\t\tif member.CompareByID(s.rt.This()) {\n\t\t\tcount := s.pubsub.Publish(publishCmd.Channel, publishCmd.Message)\n\t\t\ttotal += count\n\t\t\tPublishedTotal.Increase(int64(count))\n\t\t\tcontinue\n\t\t}\n\n\t\tpi := protocol.NewPublishInternal(publishCmd.Channel, publishCmd.Message).Command(s.ctx)\n\t\trc := s.client.Get(member.String())\n\t\terr = rc.Process(s.ctx, pi)\n\t\tif err != nil {\n\t\t\tprotocol.WriteError(conn, err)\n\t\t\treturn\n\t\t}\n\t\tpcount, err := pi.Result()\n\t\tif err != nil {\n\t\t\tprotocol.WriteError(conn, err)\n\t\t\treturn\n\t\t}\n\t\ttotal += int(pcount)\n\t\tPublishedTotal.Increase(pcount)\n\t}\n\n\tconn.WriteInt(total)\n}\n\nfunc (s *Service) publishInternalCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tpublishInternalCmd, err := protocol.ParsePublishInternalCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tcount := s.pubsub.Publish(publishInternalCmd.Channel, publishInternalCmd.Message)\n\tconn.WriteInt(count)\n}\n\nfunc (s *Service) psubscribeCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tpsubscribeCmd, err := protocol.ParsePSubscribeCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tfor _, pattern := range psubscribeCmd.Patterns {\n\t\ts.pubsub.Psubscribe(conn, pattern)\n\t\tPSubscribersTotal.Increase(1)\n\t\tCurrentPSubscribers.Increase(1)\n\t}\n}\n\nfunc (s *Service) pubsubChannelsCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tpubsubChannelsCmd, err := protocol.ParsePubSubChannelsCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tvar channels []string\n\tif pubsubChannelsCmd.Pattern != \"\" {\n\t\tchannels = s.pubsub.ChannelsWithPatterns(pubsubChannelsCmd.Pattern)\n\t} else {\n\t\tchannels = s.pubsub.Channels()\n\t}\n\tconn.WriteArray(len(channels))\n\tfor _, channel := range channels {\n\t\tconn.WriteBulkString(channel)\n\t}\n}\n\nfunc (s *Service) pubsubNumpatCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\t_, err := protocol.ParsePubSubNumpatCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tconn.WriteInt(s.pubsub.Numpat())\n}\n\nfunc (s *Service) pubsubNumsubCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tpubsubNumsubCmd, err := protocol.ParsePubSubNumsubCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tif len(pubsubNumsubCmd.Channels) == 0 {\n\t\tconn.WriteArray(0)\n\t\treturn\n\t}\n\n\tconn.WriteArray(len(pubsubNumsubCmd.Channels) * 2)\n\tfor _, channel := range pubsubNumsubCmd.Channels {\n\t\tconn.WriteBulkString(channel)\n\t\tconn.WriteInt(s.pubsub.Numsub(channel))\n\t}\n}\n"
  },
  {
    "path": "internal/pubsub/handlers_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pubsub\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/testcluster\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestPubSub_Handler_Subscribe(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\tps := rc.Subscribe(ctx, \"my-channel\")\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\tmsgi, err := ps.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\tsubs := msgi.(*redis.Subscription)\n\trequire.Equal(t, \"subscribe\", subs.Kind)\n\trequire.Equal(t, \"my-channel\", subs.Channel)\n\trequire.Equal(t, 1, subs.Count)\n\n\t// Go channel which receives messages.\n\tch := ps.Channel()\n\n\texpected := make(map[string]struct{})\n\tfor i := 0; i < 10; i++ {\n\t\tmsg := fmt.Sprintf(\"my-message-%d\", i)\n\t\terr = rc.Publish(ctx, \"my-channel\", msg).Err()\n\t\trequire.NoError(t, err)\n\t\texpected[msg] = struct{}{}\n\t}\n\n\tconsumed := make(map[string]struct{})\nL:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-ch:\n\t\t\trequire.Equal(t, \"my-channel\", msg.Channel)\n\t\t\tconsumed[msg.Payload] = struct{}{}\n\t\t\tif len(consumed) == 10 {\n\t\t\t\t// It would be OK\n\t\t\t\tbreak L\n\t\t\t}\n\t\tcase <-time.After(5 * time.Second):\n\t\t\t// Enough. Break it and check the consumed items.\n\t\t\tbreak L\n\t\t}\n\t}\n\n\trequire.Equal(t, expected, consumed)\n}\n\nfunc TestPubSub_Handler_Unsubscribe(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\tps := rc.Subscribe(ctx, \"my-channel\")\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\t_, err := ps.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\t// Go channel which receives messages.\n\tch := ps.Channel()\n\n\terr = ps.Unsubscribe(ctx, \"my-channel\")\n\trequire.NoError(t, err)\n\n\t// Wait for some time. Because the Redis client doesn't wait for the response after\n\t// writing 'unsubscribe' command.\n\t<-time.After(250 * time.Millisecond)\n\n\terr = rc.Publish(ctx, \"my-channel\", \"hello, world!\").Err()\n\trequire.NoError(t, err)\nL:\n\tfor {\n\t\tselect {\n\t\tcase <-ch:\n\t\t\trequire.Fail(t, \"Received a message from an unsubscribed channel\")\n\t\tcase <-time.After(250 * time.Millisecond):\n\t\t\t// Enough. Break it and check the consumed items.\n\t\t\tbreak L\n\t\t}\n\t}\n}\n\nfunc TestPubSub_Handler_PSubscribe(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\tps := rc.PSubscribe(ctx, \"h?llo\")\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\tmsgi, err := ps.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\tsubs := msgi.(*redis.Subscription)\n\trequire.Equal(t, \"psubscribe\", subs.Kind)\n\trequire.Equal(t, \"h?llo\", subs.Channel)\n\trequire.Equal(t, 1, subs.Count)\n\n\t// Go channel which receives messages.\n\tch := ps.Channel()\n\n\texpected := make(map[string]struct{})\n\tfor _, channel := range []string{\"hello\", \"hallo\", \"hxllo\"} {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tmsg := fmt.Sprintf(\"my-message-%s-%d\", channel, i)\n\t\t\terr = rc.Publish(ctx, channel, msg).Err()\n\t\t\trequire.NoError(t, err)\n\t\t\texpected[msg] = struct{}{}\n\t\t}\n\t}\n\n\tconsumed := make(map[string]struct{})\nL:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-ch:\n\t\t\tconsumed[msg.Payload] = struct{}{}\n\t\t\tif len(consumed) == 30 {\n\t\t\t\t// It would be OK\n\t\t\t\tbreak L\n\t\t\t}\n\t\tcase <-time.After(5 * time.Second):\n\t\t\t// Enough. Break it and check the consumed items.\n\t\t\tbreak L\n\t\t}\n\t}\n\n\trequire.Equal(t, expected, consumed)\n}\n\nfunc TestPubSub_Handler_PUnsubscribe(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\tps := rc.PSubscribe(ctx, \"h?llo\")\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\t_, err := ps.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\t// Go channel which receives messages.\n\tch := ps.Channel()\n\n\terr = ps.PUnsubscribe(ctx, \"h?llo\")\n\trequire.NoError(t, err)\n\n\t// Wait for some time. Because the Redis client doesn't wait for the response after\n\t// writing 'unsubscribe' command.\n\t<-time.After(250 * time.Millisecond)\n\n\tfor _, channel := range []string{\"hello\", \"hallo\", \"hxllo\"} {\n\t\terr = rc.Publish(ctx, channel, \"hello, world!\").Err()\n\t\trequire.NoError(t, err)\n\t}\n\nL:\n\tfor {\n\t\tselect {\n\t\tcase <-ch:\n\t\t\trequire.Fail(t, \"Received a message from an unsubscribed channel\")\n\t\tcase <-time.After(250 * time.Millisecond):\n\t\t\t// Enough. Break it and check the consumed items.\n\t\t\tbreak L\n\t\t}\n\t}\n}\n\nfunc TestPubSub_Handler_Ping(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\tps := rc.Subscribe(ctx, \"my-channel\")\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\t_, err := ps.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\terr = ps.Ping(ctx, \"hello, world!\")\n\trequire.NoError(t, err)\n\n\tmsg, err := ps.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"Pong<hello, world!>\", msg.(*redis.Pong).String())\n}\n\nfunc TestPubSub_Handler_Close(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\tps := rc.Subscribe(ctx, \"my-channel\")\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\t_, err := ps.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\terr = ps.Close()\n\trequire.NoError(t, err)\n\n\terr = ps.Ping(ctx)\n\trequire.Error(t, err, \"redis: client is closed\")\n\t//TODO: Control active subscriber count\n}\n\nfunc TestPubSub_Handler_PubSubChannels_Without_Patterns(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\tchannels := make(map[string]struct{})\n\tfor i := 0; i < 10; i++ {\n\t\tchannel := fmt.Sprintf(\"my-channel-%d\", i)\n\t\tps := rc.Subscribe(ctx, channel)\n\t\t// Wait for confirmation that subscription is created before publishing anything.\n\t\t_, err := ps.ReceiveTimeout(ctx, time.Second)\n\t\trequire.NoError(t, err)\n\t\tchannels[channel] = struct{}{}\n\t}\n\n\tres := rc.PubSubChannels(ctx, \"\")\n\tresult, err := res.Result()\n\trequire.NoError(t, err)\n\trequire.Len(t, result, len(channels))\n\n\tfor _, channel := range result {\n\t\trequire.Contains(t, channels, channel)\n\t}\n}\n\nfunc TestPubSub_Handler_PubSubChannels_With_Patterns(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\n\tchannels := make(map[string]struct{})\n\tfor _, channel := range []string{\"hello-1\", \"hello-2\", \"hello-3\", \"foobar\"} {\n\t\tps := rc.Subscribe(ctx, channel)\n\t\t// Wait for confirmation that subscription is created before publishing anything.\n\t\t_, err := ps.ReceiveTimeout(ctx, time.Second)\n\t\trequire.NoError(t, err)\n\t\tchannels[channel] = struct{}{}\n\t}\n\n\tres := rc.PubSubChannels(ctx, \"h*\")\n\tresult, err := res.Result()\n\trequire.NoError(t, err)\n\trequire.Len(t, result, len(channels)-1)\n\trequire.NotContains(t, result, \"foobar\")\n\n\tfor _, channel := range result {\n\t\trequire.Contains(t, channels, channel)\n\t}\n}\n\nfunc TestPubSub_Handler_PubSubNumpat(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\n\tfor _, channel := range []string{\"h*llo\", \"f*bar\"} {\n\t\tps := rc.PSubscribe(ctx, channel)\n\t\t// Wait for confirmation that subscription is created before publishing anything.\n\t\t_, err := ps.ReceiveTimeout(ctx, time.Second)\n\t\trequire.NoError(t, err)\n\t}\n\n\tres := rc.PubSubNumPat(ctx)\n\tnr, err := res.Result()\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(2), nr)\n}\n\nfunc TestPubSub_Handler_PubSubNumsub(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc := s.client.Get(s.rt.This().String())\n\tctx := context.Background()\n\n\tfor _, channel := range []string{\"hello\", \"hello\", \"foobar\", \"barfoo\"} {\n\t\tps := rc.Subscribe(ctx, channel)\n\t\t// Wait for confirmation that subscription is created before publishing anything.\n\t\t_, err := ps.ReceiveTimeout(ctx, time.Second)\n\t\trequire.NoError(t, err)\n\t}\n\n\tres := rc.PubSubNumSub(ctx, \"hello\", \"foobar\", \"barfoo\")\n\tnr, err := res.Result()\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(2), nr[\"hello\"])\n\trequire.Equal(t, int64(1), nr[\"foobar\"])\n\trequire.Equal(t, int64(1), nr[\"barfoo\"])\n}\n\nfunc TestPubSub_Cluster(t *testing.T) {\n\tcluster := testcluster.New(NewService)\n\ts1 := cluster.AddMember(nil).(*Service)\n\ts2 := cluster.AddMember(nil).(*Service)\n\tdefer cluster.Shutdown()\n\n\trc1 := s1.client.Get(s1.rt.This().String())\n\tctx := context.Background()\n\tps := rc1.Subscribe(ctx, \"my-channel\")\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\tmsgi, err := ps.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\tsubs := msgi.(*redis.Subscription)\n\trequire.Equal(t, \"subscribe\", subs.Kind)\n\trequire.Equal(t, \"my-channel\", subs.Channel)\n\trequire.Equal(t, 1, subs.Count)\n\n\t// Go channel which receives messages.\n\tch := ps.Channel()\n\n\trc2 := s2.client.Get(s2.rt.This().String())\n\texpected := make(map[string]struct{})\n\tfor i := 0; i < 10; i++ {\n\t\tmsg := fmt.Sprintf(\"my-message-%d\", i)\n\t\terr = rc2.Publish(ctx, \"my-channel\", msg).Err()\n\t\trequire.NoError(t, err)\n\t\texpected[msg] = struct{}{}\n\t}\n\n\tconsumed := make(map[string]struct{})\nL:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-ch:\n\t\t\trequire.Equal(t, \"my-channel\", msg.Channel)\n\t\t\tconsumed[msg.Payload] = struct{}{}\n\t\t\tif len(consumed) == 10 {\n\t\t\t\t// It would be OK\n\t\t\t\tbreak L\n\t\t\t}\n\t\tcase <-time.After(5 * time.Second):\n\t\t\t// Enough. Break it and check the consumed items.\n\t\t\tbreak L\n\t\t}\n\t}\n\n\trequire.Equal(t, expected, consumed)\n}\n"
  },
  {
    "path": "internal/pubsub/pubsub.go",
    "content": "// The MIT License (MIT)\n//\n// Copyright (c) 2016 Josh Baker\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy of\n// this software and associated documentation files (the \"Software\"), to deal in\n// the Software without restriction, including without limitation the rights to\n// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n// the Software, and to permit persons to whom the Software is furnished to do so,\n// subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in all\n// copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\npackage pubsub\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/tidwall/btree\"\n\t\"github.com/tidwall/match\"\n\t\"github.com/tidwall/redcon\"\n)\n\n// PubSub is a Redis compatible pub/sub server\ntype PubSub struct {\n\tmu     sync.RWMutex\n\tnextid uint64\n\tinitd  bool\n\tchans  *btree.BTree\n\tconns  map[redcon.Conn]*pubSubConn\n\n\t// callbacks\n\tunsubscribeCallback  func()\n\tpunsubscribeCallback func()\n}\n\n// Subscribe a connection to PubSub\nfunc (ps *PubSub) Subscribe(conn redcon.Conn, channel string) {\n\tps.subscribe(conn, false, channel)\n}\n\n// Psubscribe a connection to PubSub\nfunc (ps *PubSub) Psubscribe(conn redcon.Conn, channel string) {\n\tps.subscribe(conn, true, channel)\n}\n\n// Publish a message to subscribers\nfunc (ps *PubSub) Publish(channel, message string) int {\n\tps.mu.RLock()\n\tdefer ps.mu.RUnlock()\n\tif !ps.initd {\n\t\treturn 0\n\t}\n\tvar sent int\n\t// write messages to all clients that are subscribed on the channel\n\tpivot := &pubSubEntry{pattern: false, channel: channel}\n\tps.chans.Ascend(pivot, func(item interface{}) bool {\n\t\tentry := item.(*pubSubEntry)\n\t\tif entry.channel != pivot.channel || entry.pattern != pivot.pattern {\n\t\t\treturn false\n\t\t}\n\t\tentry.sconn.writeMessage(entry.pattern, \"\", channel, message)\n\t\tsent++\n\t\treturn true\n\t})\n\n\t// match on and write all psubscribe clients\n\tpivot = &pubSubEntry{pattern: true}\n\tps.chans.Ascend(pivot, func(item interface{}) bool {\n\t\tentry := item.(*pubSubEntry)\n\t\tif match.Match(channel, entry.channel) {\n\t\t\tentry.sconn.writeMessage(entry.pattern, entry.channel, channel,\n\t\t\t\tmessage)\n\t\t}\n\t\tsent++\n\t\treturn true\n\t})\n\n\treturn sent\n}\n\ntype pubSubConn struct {\n\tid      uint64\n\tmu      sync.Mutex\n\tconn    redcon.Conn\n\tdconn   redcon.DetachedConn\n\tentries map[*pubSubEntry]bool\n}\n\ntype pubSubEntry struct {\n\tpattern bool\n\tsconn   *pubSubConn\n\tchannel string\n}\n\nfunc (sconn *pubSubConn) writeMessage(pat bool, pchan, channel, msg string) {\n\tsconn.mu.Lock()\n\tdefer sconn.mu.Unlock()\n\tif pat {\n\t\tsconn.dconn.WriteArray(4)\n\t\tsconn.dconn.WriteBulkString(\"pmessage\")\n\t\tsconn.dconn.WriteBulkString(pchan)\n\t\tsconn.dconn.WriteBulkString(channel)\n\t\tsconn.dconn.WriteBulkString(msg)\n\t} else {\n\t\tsconn.dconn.WriteArray(3)\n\t\tsconn.dconn.WriteBulkString(\"message\")\n\t\tsconn.dconn.WriteBulkString(channel)\n\t\tsconn.dconn.WriteBulkString(msg)\n\t}\n\tsconn.dconn.Flush()\n}\n\n// bgrunner runs in the background and reads incoming commands from the\n// detached client.\nfunc (sconn *pubSubConn) bgrunner(ps *PubSub) {\n\tdefer func() {\n\t\t// client connection has ended, disconnect from the PubSub instances\n\t\t// and close the network connection.\n\t\tps.mu.Lock()\n\t\tdefer ps.mu.Unlock()\n\t\tfor entry := range sconn.entries {\n\t\t\tps.chans.Delete(entry)\n\t\t}\n\t\tdelete(ps.conns, sconn.conn)\n\t\tsconn.mu.Lock()\n\t\tdefer sconn.mu.Unlock()\n\t\tsconn.dconn.Close()\n\t}()\n\tfor {\n\t\tcmd, err := sconn.dconn.ReadCommand()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif len(cmd.Args) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch strings.ToLower(string(cmd.Args[0])) {\n\t\tcase \"psubscribe\", \"subscribe\":\n\t\t\tif len(cmd.Args) < 2 {\n\t\t\t\tfunc() {\n\t\t\t\t\tsconn.mu.Lock()\n\t\t\t\t\tdefer sconn.mu.Unlock()\n\t\t\t\t\tsconn.dconn.WriteError(fmt.Sprintf(\"ERR wrong number of \"+\n\t\t\t\t\t\t\"arguments for '%s'\", cmd.Args[0]))\n\t\t\t\t\tsconn.dconn.Flush()\n\t\t\t\t}()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcommand := strings.ToLower(string(cmd.Args[0]))\n\t\t\tfor i := 1; i < len(cmd.Args); i++ {\n\t\t\t\tif command == \"psubscribe\" {\n\t\t\t\t\tps.Psubscribe(sconn.conn, string(cmd.Args[i]))\n\t\t\t\t} else {\n\t\t\t\t\tps.Subscribe(sconn.conn, string(cmd.Args[i]))\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"unsubscribe\", \"punsubscribe\":\n\t\t\tpattern := strings.ToLower(string(cmd.Args[0])) == \"punsubscribe\"\n\t\t\tif len(cmd.Args) == 1 {\n\t\t\t\tps.unsubscribe(sconn.conn, pattern, true, \"\")\n\t\t\t} else {\n\t\t\t\tfor i := 1; i < len(cmd.Args); i++ {\n\t\t\t\t\tchannel := string(cmd.Args[i])\n\t\t\t\t\tps.unsubscribe(sconn.conn, pattern, false, channel)\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"quit\":\n\t\t\tfunc() {\n\t\t\t\tsconn.mu.Lock()\n\t\t\t\tdefer sconn.mu.Unlock()\n\t\t\t\tsconn.dconn.WriteString(\"OK\")\n\t\t\t\tsconn.dconn.Flush()\n\t\t\t\tsconn.dconn.Close()\n\t\t\t}()\n\t\t\treturn\n\t\tcase \"ping\":\n\t\t\tvar msg string\n\t\t\tswitch len(cmd.Args) {\n\t\t\tcase 1:\n\t\t\tcase 2:\n\t\t\t\tmsg = string(cmd.Args[1])\n\t\t\tdefault:\n\t\t\t\tfunc() {\n\t\t\t\t\tsconn.mu.Lock()\n\t\t\t\t\tdefer sconn.mu.Unlock()\n\t\t\t\t\tsconn.dconn.WriteError(fmt.Sprintf(\"ERR wrong number of \"+\n\t\t\t\t\t\t\"arguments for '%s'\", cmd.Args[0]))\n\t\t\t\t\tsconn.dconn.Flush()\n\t\t\t\t}()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfunc() {\n\t\t\t\tsconn.mu.Lock()\n\t\t\t\tdefer sconn.mu.Unlock()\n\t\t\t\tsconn.dconn.WriteArray(2)\n\t\t\t\tsconn.dconn.WriteBulkString(\"pong\")\n\t\t\t\tsconn.dconn.WriteBulkString(msg)\n\t\t\t\tsconn.dconn.Flush()\n\t\t\t}()\n\t\tdefault:\n\t\t\tfunc() {\n\t\t\t\tsconn.mu.Lock()\n\t\t\t\tdefer sconn.mu.Unlock()\n\t\t\t\tsconn.dconn.WriteError(fmt.Sprintf(\"ERR Can't execute '%s': \"+\n\t\t\t\t\t\"only (P)SUBSCRIBE / (P)UNSUBSCRIBE / PING / QUIT are \"+\n\t\t\t\t\t\"allowed in this context\", cmd.Args[0]))\n\t\t\t\tsconn.dconn.Flush()\n\t\t\t}()\n\t\t}\n\t}\n}\n\n// byEntry is a \"less\" function that sorts the entries in a btree. The tree\n// is sorted be (pattern, channel, conn.id). All pattern=true entries are at\n// the end (right) of the tree.\nfunc byEntry(a, b interface{}) bool {\n\taa := a.(*pubSubEntry)\n\tbb := b.(*pubSubEntry)\n\tif !aa.pattern && bb.pattern {\n\t\treturn true\n\t}\n\tif aa.pattern && !bb.pattern {\n\t\treturn false\n\t}\n\tif aa.channel < bb.channel {\n\t\treturn true\n\t}\n\tif aa.channel > bb.channel {\n\t\treturn false\n\t}\n\tvar aid uint64\n\tvar bid uint64\n\tif aa.sconn != nil {\n\t\taid = aa.sconn.id\n\t}\n\tif bb.sconn != nil {\n\t\tbid = bb.sconn.id\n\t}\n\treturn aid < bid\n}\n\nfunc (ps *PubSub) subscribe(conn redcon.Conn, pattern bool, channel string) {\n\tps.mu.Lock()\n\tdefer ps.mu.Unlock()\n\n\t// initialize the PubSub instance\n\tif !ps.initd {\n\t\tps.conns = make(map[redcon.Conn]*pubSubConn)\n\t\tps.chans = btree.New(byEntry)\n\t\tps.initd = true\n\t}\n\n\t// fetch the pubSubConn\n\tsconn, ok := ps.conns[conn]\n\tif !ok {\n\t\t// initialize a new pubSubConn, which runs on a detached connection,\n\t\t// and attach it to the PubSub channels/conn btree\n\t\tps.nextid++\n\t\tdconn := conn.Detach()\n\t\tsconn = &pubSubConn{\n\t\t\tid:      ps.nextid,\n\t\t\tconn:    conn,\n\t\t\tdconn:   dconn,\n\t\t\tentries: make(map[*pubSubEntry]bool),\n\t\t}\n\t\tps.conns[conn] = sconn\n\t}\n\tsconn.mu.Lock()\n\tdefer sconn.mu.Unlock()\n\n\t// add an entry to the pubsub btree\n\tentry := &pubSubEntry{\n\t\tpattern: pattern,\n\t\tchannel: channel,\n\t\tsconn:   sconn,\n\t}\n\tps.chans.Set(entry)\n\tsconn.entries[entry] = true\n\n\t// send a message to the client\n\tsconn.dconn.WriteArray(3)\n\tif pattern {\n\t\tsconn.dconn.WriteBulkString(\"psubscribe\")\n\t} else {\n\t\tsconn.dconn.WriteBulkString(\"subscribe\")\n\t}\n\tsconn.dconn.WriteBulkString(channel)\n\tvar count int\n\tfor ient := range sconn.entries {\n\t\tif ient.pattern == pattern {\n\t\t\tcount++\n\t\t}\n\t}\n\tsconn.dconn.WriteInt(count)\n\tsconn.dconn.Flush()\n\n\t// start the background client operation\n\tif !ok {\n\t\tgo sconn.bgrunner(ps)\n\t}\n}\n\nfunc (ps *PubSub) unsubscribe(conn redcon.Conn, pattern, all bool, channel string) {\n\tps.mu.Lock()\n\tdefer ps.mu.Unlock()\n\t// fetch the pubSubConn. This must exist\n\tsconn := ps.conns[conn]\n\tsconn.mu.Lock()\n\tdefer sconn.mu.Unlock()\n\n\tremoveEntry := func(entry *pubSubEntry) {\n\t\tif entry != nil {\n\t\t\tps.chans.Delete(entry)\n\t\t\tdelete(sconn.entries, entry)\n\t\t}\n\t\tsconn.dconn.WriteArray(3)\n\t\tif pattern {\n\t\t\tif ps.punsubscribeCallback != nil {\n\t\t\t\tps.punsubscribeCallback()\n\t\t\t}\n\t\t\tsconn.dconn.WriteBulkString(\"punsubscribe\")\n\t\t} else {\n\t\t\tif ps.unsubscribeCallback != nil {\n\t\t\t\tps.unsubscribeCallback()\n\t\t\t}\n\t\t\tsconn.dconn.WriteBulkString(\"unsubscribe\")\n\t\t}\n\t\tif entry != nil {\n\t\t\tsconn.dconn.WriteBulkString(entry.channel)\n\t\t} else {\n\t\t\tsconn.dconn.WriteNull()\n\t\t}\n\t\tvar count int\n\t\tfor ient := range sconn.entries {\n\t\t\tif ient.pattern == pattern {\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t\tsconn.dconn.WriteInt(count)\n\t}\n\tif all {\n\t\t// unsubscribe from all (p)subscribe entries\n\t\tvar entries []*pubSubEntry\n\t\tfor ient := range sconn.entries {\n\t\t\tif ient.pattern == pattern {\n\t\t\t\tentries = append(entries, ient)\n\t\t\t}\n\t\t}\n\t\tif len(entries) == 0 {\n\t\t\tremoveEntry(nil)\n\t\t} else {\n\t\t\tfor _, entry := range entries {\n\t\t\t\tremoveEntry(entry)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// unsubscribe single channel from (p)subscribe.\n\t\tvar entry *pubSubEntry\n\t\tfor ient := range sconn.entries {\n\t\t\tif ient.pattern == pattern && ient.channel == channel {\n\t\t\t\tentry = ient\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tremoveEntry(entry)\n\t}\n\tsconn.dconn.Flush()\n}\n\nfunc (ps *PubSub) Channels() []string {\n\tps.mu.RLock()\n\tdefer ps.mu.RUnlock()\n\n\tif !ps.initd {\n\t\treturn nil\n\t}\n\n\tvar channels []string\n\tfor _, sconn := range ps.conns {\n\t\tsconn.mu.Lock()\n\t\tfor ient := range sconn.entries {\n\t\t\tif !ient.pattern {\n\t\t\t\tchannels = append(channels, ient.channel)\n\t\t\t}\n\t\t}\n\t\tsconn.mu.Unlock()\n\t}\n\n\treturn channels\n}\n\nfunc (ps *PubSub) ChannelsWithPatterns(pattern string) []string {\n\tps.mu.RLock()\n\tdefer ps.mu.RUnlock()\n\n\tif !ps.initd {\n\t\treturn nil\n\t}\n\n\tvar channels []string\n\tfor _, sconn := range ps.conns {\n\t\tsconn.mu.Lock()\n\t\tfor ient := range sconn.entries {\n\t\t\tif match.Match(ient.channel, pattern) {\n\t\t\t\tchannels = append(channels, ient.channel)\n\t\t\t}\n\t\t}\n\t\tsconn.mu.Unlock()\n\t}\n\n\treturn channels\n}\n\nfunc (ps *PubSub) Numpat() int {\n\tps.mu.RLock()\n\tdefer ps.mu.RUnlock()\n\n\tif !ps.initd {\n\t\treturn 0\n\t}\n\n\tset := make(map[string]struct{})\n\tfor _, sconn := range ps.conns {\n\t\tsconn.mu.Lock()\n\t\tfor ient := range sconn.entries {\n\t\t\tif ient.pattern {\n\t\t\t\tset[ient.channel] = struct{}{}\n\t\t\t}\n\t\t}\n\t\tsconn.mu.Unlock()\n\t}\n\n\treturn len(set)\n}\n\nfunc (ps *PubSub) Numsub(channel string) int {\n\tps.mu.RLock()\n\tdefer ps.mu.RUnlock()\n\n\tif !ps.initd {\n\t\treturn 0\n\t}\n\n\tvar result int\n\tfor _, sconn := range ps.conns {\n\t\tsconn.mu.Lock()\n\t\tfor ient := range sconn.entries {\n\t\t\tif ient.channel == channel {\n\t\t\t\tresult++\n\t\t\t}\n\t\t}\n\t\tsconn.mu.Unlock()\n\t}\n\n\treturn result\n}\n"
  },
  {
    "path": "internal/pubsub/pubsub_test.go",
    "content": "// The MIT License (MIT)\n//\n// Copyright (c) 2016 Josh Baker\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy of\n// this software and associated documentation files (the \"Software\"), to deal in\n// the Software without restriction, including without limitation the rights to\n// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n// the Software, and to permit persons to whom the Software is furnished to do so,\n// subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in all\n// copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\npackage pubsub\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc testPubSubServer(addr string, done chan bool) {\n\tvar ps PubSub\n\tgo func() {\n\t\ttch := time.NewTicker(time.Millisecond * 5)\n\t\tdefer tch.Stop()\n\t\tchannels := []string{\"achan1\", \"bchan2\", \"cchan3\", \"dchan4\"}\n\t\tfor i := 0; ; i++ {\n\t\t\tselect {\n\t\t\tcase <-tch.C:\n\t\t\tcase <-done:\n\t\t\t\tfor {\n\t\t\t\t\tvar empty bool\n\t\t\t\t\tps.mu.Lock()\n\t\t\t\t\tif len(ps.conns) == 0 {\n\t\t\t\t\t\tif ps.chans.Len() != 0 {\n\t\t\t\t\t\t\tpanic(\"chans not empty\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tempty = true\n\t\t\t\t\t}\n\t\t\t\t\tps.mu.Unlock()\n\t\t\t\t\tif empty {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(time.Millisecond * 10)\n\t\t\t\t}\n\t\t\t\tdone <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t\tchannel := channels[i%len(channels)]\n\t\t\tmessage := fmt.Sprintf(\"message %d\", i)\n\t\t\tps.Publish(channel, message)\n\t\t}\n\t}()\n\tpanic(redcon.ListenAndServe(addr, func(conn redcon.Conn, cmd redcon.Command) {\n\t\tswitch strings.ToLower(string(cmd.Args[0])) {\n\t\tdefault:\n\t\t\tconn.WriteError(\"ERR unknown command '\" +\n\t\t\t\tstring(cmd.Args[0]) + \"'\")\n\t\tcase \"publish\":\n\t\t\tif len(cmd.Args) != 3 {\n\t\t\t\tconn.WriteError(\"ERR wrong number of arguments for '\" +\n\t\t\t\t\tstring(cmd.Args[0]) + \"' command\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcount := ps.Publish(string(cmd.Args[1]), string(cmd.Args[2]))\n\t\t\tconn.WriteInt(count)\n\t\tcase \"subscribe\", \"psubscribe\":\n\t\t\tif len(cmd.Args) < 2 {\n\t\t\t\tconn.WriteError(\"ERR wrong number of arguments for '\" +\n\t\t\t\t\tstring(cmd.Args[0]) + \"' command\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcommand := strings.ToLower(string(cmd.Args[0]))\n\t\t\tfor i := 1; i < len(cmd.Args); i++ {\n\t\t\t\tif command == \"psubscribe\" {\n\t\t\t\t\tps.Psubscribe(conn, string(cmd.Args[i]))\n\t\t\t\t} else {\n\t\t\t\t\tps.Subscribe(conn, string(cmd.Args[i]))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}, nil, nil))\n}\n\nfunc TestPubSub(t *testing.T) {\n\taddr := \":12346\"\n\tdone := make(chan bool)\n\tgo testPubSubServer(addr, done)\n\n\tfinal := make(chan bool)\n\tgo func() {\n\t\tselect {\n\t\tcase <-time.Tick(time.Second * 30):\n\t\t\tpanic(\"timeout\")\n\t\tcase <-final:\n\t\t\treturn\n\t\t}\n\t}()\n\n\t// create 10 connections\n\tvar wg sync.WaitGroup\n\twg.Add(10)\n\tfor i := 0; i < 10; i++ {\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\tvar conn net.Conn\n\t\t\tfor i := 0; i < 5; i++ {\n\t\t\t\tvar err error\n\t\t\t\tconn, err = net.Dial(\"tcp\", addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttime.Sleep(time.Second / 10)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif conn == nil {\n\t\t\t\trequire.Fail(t, \"could not connect to server\")\n\t\t\t}\n\t\t\tdefer conn.Close()\n\n\t\t\tregs := make(map[string]int)\n\t\t\tvar maxp int\n\t\t\tvar maxs int\n\t\t\t_, err := fmt.Fprintf(conn, \"subscribe achan1\\r\\n\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t_, err = fmt.Fprintf(conn, \"subscribe bchan2 cchan3\\r\\n\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t_, err = fmt.Fprintf(conn, \"psubscribe a*1\\r\\n\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t_, err = fmt.Fprintf(conn, \"psubscribe b*2 c*3\\r\\n\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// collect 50 messages from each channel\n\t\t\trd := bufio.NewReader(conn)\n\t\t\tvar buf []byte\n\t\t\tfor {\n\t\t\t\tline, err := rd.ReadBytes('\\n')\n\t\t\t\tif err != nil {\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t}\n\t\t\t\tbuf = append(buf, line...)\n\t\t\t\tn, resp := redcon.ReadNextRESP(buf)\n\t\t\t\tif n == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbuf = nil\n\t\t\t\tif resp.Type != redcon.Array {\n\t\t\t\t\trequire.Fail(t, \"expected array\")\n\t\t\t\t}\n\t\t\t\tvar vals []redcon.RESP\n\t\t\t\tresp.ForEach(func(item redcon.RESP) bool {\n\t\t\t\t\tvals = append(vals, item)\n\t\t\t\t\treturn true\n\t\t\t\t})\n\n\t\t\t\tname := string(vals[0].Data)\n\t\t\t\tswitch name {\n\t\t\t\tcase \"subscribe\":\n\t\t\t\t\trequire.Len(t, vals, 3)\n\n\t\t\t\t\tch := string(vals[1].Data)\n\t\t\t\t\tregs[ch] = 0\n\t\t\t\t\tmaxs, _ = strconv.Atoi(string(vals[2].Data))\n\t\t\t\tcase \"psubscribe\":\n\t\t\t\t\trequire.Len(t, vals, 3)\n\n\t\t\t\t\tch := string(vals[1].Data)\n\t\t\t\t\tregs[ch] = 0\n\t\t\t\t\tmaxp, _ = strconv.Atoi(string(vals[2].Data))\n\t\t\t\tcase \"message\":\n\t\t\t\t\trequire.Len(t, vals, 3)\n\n\t\t\t\t\tch := string(vals[1].Data)\n\t\t\t\t\tregs[ch] = regs[ch] + 1\n\t\t\t\tcase \"pmessage\":\n\t\t\t\t\trequire.Len(t, vals, 4)\n\n\t\t\t\t\tch := string(vals[1].Data)\n\t\t\t\t\tregs[ch] = regs[ch] + 1\n\t\t\t\t}\n\t\t\t\tif len(regs) == 6 && maxp == 3 && maxs == 3 {\n\t\t\t\t\tready := true\n\t\t\t\t\tfor _, count := range regs {\n\t\t\t\t\t\tif count < 50 {\n\t\t\t\t\t\t\tready = false\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif ready {\n\t\t\t\t\t\t// all messages have been received\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\twg.Wait()\n\t// notify sender\n\tdone <- true\n\t// wait for sender\n\t<-done\n\t// stop the timeout\n\tfinal <- true\n}\n"
  },
  {
    "path": "internal/pubsub/service.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pubsub\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/internal/cluster/routingtable\"\n\t\"github.com/olric-data/olric/internal/environment\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/internal/service\"\n\t\"github.com/olric-data/olric/internal/stats\"\n\t\"github.com/olric-data/olric/pkg/flog\"\n)\n\nvar (\n\t// PublishedTotal is the total number of published messages during the life of this instance.\n\tPublishedTotal = stats.NewInt64Counter()\n\n\t// CurrentSubscribers is the current number of listeners of Pub/Sub.\n\tCurrentSubscribers = stats.NewInt64Gauge()\n\n\t// SubscribersTotal is the total number of registered listeners during the life of this instance.\n\tSubscribersTotal = stats.NewInt64Counter()\n\n\tCurrentPSubscribers = stats.NewInt64Gauge()\n\tPSubscribersTotal   = stats.NewInt64Counter()\n)\n\ntype Service struct {\n\tsync.RWMutex\n\n\tlog    *flog.Logger\n\tpubsub *PubSub\n\trt     *routingtable.RoutingTable\n\tserver *server.Server\n\tclient *server.Client\n\twg     sync.WaitGroup\n\tctx    context.Context\n\tcancel context.CancelFunc\n}\n\nfunc (s *Service) RegisterHandlers() {\n\ts.server.ServeMux().HandleFunc(protocol.PubSub.Subscribe, s.subscribeCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.PubSub.PSubscribe, s.psubscribeCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.PubSub.Publish, s.publishCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.PubSub.PublishInternal, s.publishInternalCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.PubSub.PubSubChannels, s.pubsubChannelsCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.PubSub.PubSubNumpat, s.pubsubNumpatCommandHandler)\n\ts.server.ServeMux().HandleFunc(protocol.PubSub.PubSubNumsub, s.pubsubNumsubCommandHandler)\n\n}\n\nfunc NewService(e *environment.Environment) (service.Service, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tps := &PubSub{\n\t\tunsubscribeCallback: func() {\n\t\t\tCurrentSubscribers.Decrease(1)\n\t\t},\n\t\tpunsubscribeCallback: func() {\n\t\t\tCurrentPSubscribers.Decrease(1)\n\t\t},\n\t}\n\ts := &Service{\n\t\tlog:    e.Get(\"logger\").(*flog.Logger),\n\t\trt:     e.Get(\"routingtable\").(*routingtable.RoutingTable),\n\t\tserver: e.Get(\"server\").(*server.Server),\n\t\tclient: e.Get(\"client\").(*server.Client),\n\t\tpubsub: ps,\n\t\tctx:    ctx,\n\t\tcancel: cancel,\n\t}\n\ts.RegisterHandlers()\n\treturn s, nil\n}\n\nfunc (s *Service) Start() error {\n\t// dummy implementation\n\treturn nil\n}\n\nfunc (s *Service) Shutdown(ctx context.Context) error {\n\ts.cancel()\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\ts.wg.Wait()\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\terr := ctx.Err()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase <-done:\n\t}\n\treturn nil\n}\n\nvar _ service.Service = (*Service)(nil)\n"
  },
  {
    "path": "internal/ramblock/compaction.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage ramblock\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/ramblock/table\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\nfunc (rb *RamBlock) evictTable(t *table.Table) error {\n\tvar total int\n\tvar evictErr error\n\tt.Range(func(hkey uint64, e storage.Entry) bool {\n\t\tentry, _ := t.GetRaw(hkey)\n\t\terr := rb.PutRaw(hkey, entry)\n\t\tif errors.Is(err, table.ErrNotEnoughSpace) {\n\t\t\terr := rb.makeTable()\n\t\t\tif err != nil {\n\t\t\t\tevictErr = err\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// try again\n\t\t\treturn false\n\t\t}\n\t\tif err != nil {\n\t\t\t// log this error and continue\n\t\t\tevictErr = fmt.Errorf(\"put command failed: HKey: %d: %w\", hkey, err)\n\t\t\treturn false\n\t\t}\n\n\t\terr = t.Delete(hkey)\n\t\tif errors.Is(err, table.ErrHKeyNotFound) {\n\t\t\terr = nil\n\t\t}\n\t\tif err != nil {\n\t\t\tevictErr = err\n\t\t\treturn false\n\t\t}\n\t\ttotal++\n\n\t\treturn total <= 1000\n\t})\n\n\tstats := t.Stats()\n\tif stats.Inuse == 0 {\n\t\tdelete(rb.tablesByCoefficient, t.Coefficient())\n\t\tt.Reset()\n\t}\n\n\treturn evictErr\n}\n\nfunc (rb *RamBlock) isTableExpired(recycledAt int64) bool {\n\ttimeout, err := rb.config.Get(\"maxIdleTableTimeout\")\n\tif err != nil {\n\t\t// That would be impossible\n\t\tpanic(err)\n\t}\n\tlimit := (timeout.(time.Duration).Nanoseconds() + recycledAt) / 1000000\n\treturn (time.Now().UnixNano() / 1000000) >= limit\n}\n\nfunc (rb *RamBlock) isCompactionOK(t *table.Table) bool {\n\ts := t.Stats()\n\treturn float64(s.Garbage) >= float64(s.Allocated)*maxGarbageRatio\n}\n\nfunc (rb *RamBlock) Compaction() (bool, error) {\n\tfor _, t := range rb.tables {\n\t\tif rb.isCompactionOK(t) {\n\t\t\terr := rb.evictTable(t)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\t// Continue scanning\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tfor i := 0; i < len(rb.tables); i++ {\n\t\tt := rb.tables[i]\n\t\ts := t.Stats()\n\t\tif t.State() == table.RecycledState {\n\t\t\tif rb.isTableExpired(s.RecycledAt) {\n\t\t\t\tif len(rb.tables) == 1 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tdelete(rb.tablesByCoefficient, t.Coefficient())\n\t\t\t\trb.tables = append(rb.tables[:i], rb.tables[i+1:]...)\n\t\t\t\ti--\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true, nil\n}\n"
  },
  {
    "path": "internal/ramblock/compaction_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage ramblock\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/cespare/xxhash/v2\"\n\t\"github.com/olric-data/olric/internal/ramblock/entry\"\n\t\"github.com/olric-data/olric/internal/ramblock/table\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestRamBlock_Compaction(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\ttimestamp := time.Now().UnixNano()\n\t// The current free space is 1 MB. Trigger a compaction operation.\n\tfor i := 0; i < 1500; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue([]byte(fmt.Sprintf(\"%01000d\", i)))\n\t\te.SetTTL(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 750; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\terr := s.Delete(hkey)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor {\n\t\tdone, err := s.Compaction()\n\t\trequire.NoError(t, err)\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tvar compacted bool\n\tfor _, tb := range s.(*RamBlock).tables {\n\t\tstats := tb.Stats()\n\t\tif stats.Inuse == 0 {\n\t\t\trequire.Equal(t, table.RecycledState, tb.State())\n\t\t\tcompacted = true\n\t\t} else {\n\t\t\trequire.Equal(t, 750, stats.Length)\n\t\t\trequire.Equal(t, table.ReadWriteState, tb.State())\n\t\t}\n\t}\n\n\trequire.Truef(t, compacted, \"Compaction could not work properly\")\n}\n\nfunc TestRamBlock_Compaction_MaxIdleTableDuration(t *testing.T) {\n\tc := DefaultConfig()\n\tc.Add(\"maxIdleTableTimeout\", time.Millisecond)\n\n\ts := testRamBlock(t, c)\n\n\ttimestamp := time.Now().UnixNano()\n\t// The current free space is 1 MB. Trigger a compaction operation.\n\tfor i := 0; i < 1500; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue([]byte(fmt.Sprintf(\"%01000d\", i)))\n\t\te.SetTTL(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\trequire.Equal(t, 2, len(s.(*RamBlock).tables))\n\n\tfor i := 0; i < 800; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\terr := s.Delete(hkey)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// It's still two because we have not triggered the compaction yet.\n\trequire.Equal(t, 2, len(s.(*RamBlock).tables))\n\n\tfor {\n\t\tdone, err := s.Compaction()\n\t\trequire.NoError(t, err)\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t<-time.After(100 * time.Millisecond)\n\n\t// Be sure deletion of the idle table.\n\tfor {\n\t\tdone, err := s.Compaction()\n\t\trequire.NoError(t, err)\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\t}\n\n\trequire.Equal(t, 1, len(s.(*RamBlock).tables))\n}\n"
  },
  {
    "path": "internal/ramblock/entry/entry.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage entry\n\nimport (\n\t\"encoding/binary\"\n\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\n// In-memory layout for an entry:\n//\n// KEY-LENGTH(uint8) | KEY(bytes) | TTL(uint64) | | Timestamp(uint64) | VALUE-LENGTH(uint32) | VALUE(bytes)\n\n// Entry represents a value with its metadata.\ntype Entry struct {\n\tkey        string\n\tttl        int64\n\ttimestamp  int64\n\tlastAccess int64\n\tvalue      []byte\n}\n\nvar _ storage.Entry = (*Entry)(nil)\n\nfunc New() *Entry {\n\treturn &Entry{}\n}\n\nfunc (e *Entry) SetKey(key string) {\n\te.key = key\n}\n\nfunc (e *Entry) Key() string {\n\treturn e.key\n}\n\nfunc (e *Entry) SetValue(value []byte) {\n\te.value = value\n}\n\nfunc (e *Entry) Value() []byte {\n\treturn e.value\n}\n\nfunc (e *Entry) SetTTL(ttl int64) {\n\te.ttl = ttl\n}\n\nfunc (e *Entry) TTL() int64 {\n\treturn e.ttl\n}\n\nfunc (e *Entry) SetTimestamp(timestamp int64) {\n\te.timestamp = timestamp\n}\n\nfunc (e *Entry) Timestamp() int64 {\n\treturn e.timestamp\n}\n\nfunc (e *Entry) SetLastAccess(lastAccess int64) {\n\te.lastAccess = lastAccess\n}\n\nfunc (e *Entry) LastAccess() int64 {\n\treturn e.lastAccess\n}\n\nfunc (e *Entry) Encode() []byte {\n\tvar offset int\n\n\tklen := uint8(len(e.Key()))\n\tvlen := len(e.Value())\n\tlength := 29 + len(e.Key()) + vlen\n\n\tbuf := make([]byte, length)\n\n\t// Set key length. It's 1 byte.\n\tcopy(buf[offset:], []byte{klen})\n\toffset++\n\n\t// Set the key.\n\tcopy(buf[offset:], e.Key())\n\toffset += len(e.Key())\n\n\t// Set the TTL. It's 8 bytes.\n\tbinary.BigEndian.PutUint64(buf[offset:], uint64(e.TTL()))\n\toffset += 8\n\n\t// Set the Timestamp. It's 8 bytes.\n\tbinary.BigEndian.PutUint64(buf[offset:], uint64(e.Timestamp()))\n\toffset += 8\n\n\t// Set the LastAccess. It's 8 bytes.\n\tbinary.BigEndian.PutUint64(buf[offset:], uint64(e.LastAccess()))\n\toffset += 8\n\n\t// Set the value length. It's 4 bytes.\n\tbinary.BigEndian.PutUint32(buf[offset:], uint32(len(e.Value())))\n\toffset += 4\n\n\t// Set the value.\n\tcopy(buf[offset:], e.Value())\n\treturn buf\n}\n\nfunc (e *Entry) Decode(buf []byte) {\n\tvar offset int\n\n\tkeyLength := int(buf[offset])\n\toffset++\n\n\te.key = string(buf[offset : offset+keyLength])\n\toffset += keyLength\n\n\te.ttl = int64(binary.BigEndian.Uint64(buf[offset : offset+8]))\n\toffset += 8\n\n\te.timestamp = int64(binary.BigEndian.Uint64(buf[offset : offset+8]))\n\toffset += 8\n\n\te.lastAccess = int64(binary.BigEndian.Uint64(buf[offset : offset+8]))\n\toffset += 8\n\n\tvlen := binary.BigEndian.Uint32(buf[offset : offset+4])\n\toffset += 4\n\te.value = buf[offset : offset+int(vlen)]\n}\n"
  },
  {
    "path": "internal/ramblock/entry/entry_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage entry\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestEntryEncodeDecode(t *testing.T) {\n\te := New()\n\te.SetKey(\"mykey\")\n\te.SetTTL(200)\n\te.SetTimestamp(time.Now().UnixNano())\n\te.SetLastAccess(time.Now().UnixNano())\n\te.SetValue([]byte(\"mydata\"))\n\n\tt.Run(\"Encode\", func(t *testing.T) {\n\t\tbuf := e.Encode()\n\t\trequire.NotNilf(t, buf, \"Expected some data. Got nil\")\n\n\t\tt.Run(\"Decode\", func(t *testing.T) {\n\t\t\titem := New()\n\t\t\titem.Decode(buf)\n\t\t\trequire.Equalf(t, e, item, \"Decoded Entry is different\")\n\t\t})\n\t})\n}\n\nfunc TestEntry_Encode_EmptyKey(t *testing.T) {\n\te := New()\n\te.SetKey(\"\")\n\te.SetTTL(100)\n\te.SetTimestamp(time.Now().UnixNano())\n\te.SetLastAccess(time.Now().UnixNano())\n\te.SetValue([]byte(\"somevalue\"))\n\n\tbuf := e.Encode()\n\trequire.NotNil(t, buf)\n\n\tdecoded := New()\n\tdecoded.Decode(buf)\n\trequire.Equal(t, \"\", decoded.Key())\n\trequire.Equal(t, []byte(\"somevalue\"), decoded.Value())\n\trequire.Equal(t, e.TTL(), decoded.TTL())\n\trequire.Equal(t, e.Timestamp(), decoded.Timestamp())\n\trequire.Equal(t, e.LastAccess(), decoded.LastAccess())\n}\n\nfunc TestEntry_Encode_EmptyValue(t *testing.T) {\n\te := New()\n\te.SetKey(\"mykey\")\n\te.SetTTL(50)\n\te.SetTimestamp(time.Now().UnixNano())\n\te.SetLastAccess(time.Now().UnixNano())\n\t// value is nil (zero value)\n\n\tbuf := e.Encode()\n\trequire.NotNil(t, buf)\n\n\tdecoded := New()\n\tdecoded.Decode(buf)\n\trequire.Equal(t, \"mykey\", decoded.Key())\n\trequire.Empty(t, decoded.Value())\n\trequire.Equal(t, e.TTL(), decoded.TTL())\n\trequire.Equal(t, e.Timestamp(), decoded.Timestamp())\n\trequire.Equal(t, e.LastAccess(), decoded.LastAccess())\n}\n\nfunc TestEntry_Encode_MaxLengthKey(t *testing.T) {\n\t// 255 bytes is the maximum key length that fits in a uint8.\n\tmaxKey := strings.Repeat(\"k\", 255)\n\n\te := New()\n\te.SetKey(maxKey)\n\te.SetTTL(999)\n\te.SetTimestamp(time.Now().UnixNano())\n\te.SetLastAccess(time.Now().UnixNano())\n\te.SetValue([]byte(\"val\"))\n\n\tbuf := e.Encode()\n\trequire.NotNil(t, buf)\n\n\tdecoded := New()\n\tdecoded.Decode(buf)\n\trequire.Equal(t, maxKey, decoded.Key())\n\trequire.Equal(t, []byte(\"val\"), decoded.Value())\n\trequire.Equal(t, e.TTL(), decoded.TTL())\n}\n\nfunc TestEntry_Encode_KeyLengthOverflow(t *testing.T) {\n\t// 256-byte key overflows uint8. The cast `uint8(256)` becomes 0,\n\t// so Encode writes the key length as 0 but copies the full 256 bytes\n\t// into the buffer. Decode then reads corrupted metadata offsets,\n\t// which causes a panic due to slice bounds out of range.\n\toverflowKey := strings.Repeat(\"x\", 256)\n\n\te := New()\n\te.SetKey(overflowKey)\n\te.SetTTL(1)\n\te.SetTimestamp(time.Now().UnixNano())\n\te.SetLastAccess(time.Now().UnixNano())\n\te.SetValue([]byte(\"v\"))\n\n\tbuf := e.Encode()\n\trequire.NotNil(t, buf)\n\n\t// Decode panics because the uint8-truncated key length (0) shifts all\n\t// field offsets, causing the decoded value length to be a garbage large\n\t// number that exceeds the buffer capacity.\n\trequire.Panics(t, func() {\n\t\tdecoded := New()\n\t\tdecoded.Decode(buf)\n\t}, \"Expected panic due to uint8 overflow of key length causing corrupted offsets\")\n}\n"
  },
  {
    "path": "internal/ramblock/ramblock.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n/*\nPackage ramblock implements a GC-friendly in-memory storage engine by using\nbuilt-in maps and byte slices. It also supports compaction.\n*/\npackage ramblock\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"reflect\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/ramblock/entry\"\n\t\"github.com/olric-data/olric/internal/ramblock/table\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\nconst (\n\tmaxGarbageRatio = 0.40\n\t// 1MB\n\tdefaultTableSize = uint64(1 << 20)\n\n\tdefaultMaxIdleTableTimeout = 15 * time.Minute\n)\n\n// RamBlock implements an in-memory storage engine.\ntype RamBlock struct {\n\tcoefficient         uint64\n\ttableSize           uint64\n\ttablesByCoefficient map[uint64]*table.Table\n\ttables              []*table.Table\n\tconfig              *storage.Config\n}\n\nfunc DefaultConfig() *storage.Config {\n\toptions := storage.NewConfig(nil)\n\toptions.Add(\"tableSize\", defaultTableSize)\n\toptions.Add(\"maxIdleTableTimeout\", defaultMaxIdleTableTimeout)\n\treturn options\n}\n\nfunc New(c *storage.Config) (*RamBlock, error) {\n\tif c == nil {\n\t\tc = DefaultConfig()\n\t}\n\n\traw, err := c.Get(\"tableSize\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsize, err := prepareTableSize(raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &RamBlock{\n\t\ttableSize:           size,\n\t\ttablesByCoefficient: make(map[uint64]*table.Table),\n\t\tconfig:              c,\n\t}, nil\n}\n\nfunc (rb *RamBlock) SetConfig(c *storage.Config) {\n\trb.config = c\n}\n\nfunc (rb *RamBlock) makeTable() error {\n\tif len(rb.tables) != 0 {\n\t\thead := rb.tables[len(rb.tables)-1]\n\t\thead.SetState(table.ReadOnlyState)\n\n\t\tfor i, t := range rb.tables {\n\t\t\tif t.State() == table.RecycledState {\n\n\t\t\t\trb.tables = append(rb.tables[:i], rb.tables[i+1:]...)\n\n\t\t\t\trb.tables = append(rb.tables, t)\n\t\t\t\tt.SetCoefficient(rb.coefficient)\n\t\t\t\trb.tablesByCoefficient[rb.coefficient] = t\n\t\t\t\trb.coefficient++\n\n\t\t\t\tt.SetState(table.ReadWriteState)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tnewTable := table.New(rb.tableSize)\n\trb.tables = append(rb.tables, newTable)\n\tnewTable.SetCoefficient(rb.coefficient)\n\trb.tablesByCoefficient[rb.coefficient] = newTable\n\trb.coefficient++\n\treturn nil\n}\n\nfunc (rb *RamBlock) SetLogger(_ *log.Logger) {}\n\nfunc (rb *RamBlock) Start() error {\n\tif rb.config == nil {\n\t\treturn errors.New(\"config cannot be nil\")\n\t}\n\treturn nil\n}\n\nfunc requiredSizeForAnEntry(e storage.Entry) uint64 {\n\treturn uint64(len(e.Key()) + len(e.Value()) + table.MetadataLength)\n}\n\nfunc prepareTableSize(raw interface{}) (size uint64, err error) {\n\tswitch raw.(type) {\n\tcase uint:\n\t\tsize = uint64(raw.(uint))\n\tcase uint8:\n\t\tsize = uint64(raw.(uint8))\n\tcase uint16:\n\t\tsize = uint64(raw.(uint16))\n\tcase uint32:\n\t\tsize = uint64(raw.(uint32))\n\tcase uint64:\n\t\tsize = raw.(uint64)\n\tcase int:\n\t\tv := raw.(int)\n\t\tif v < 0 {\n\t\t\terr = fmt.Errorf(\"tableSize cannot be negative: %d\", v)\n\t\t\treturn\n\t\t}\n\t\tsize = uint64(v)\n\tcase int8:\n\t\tv := raw.(int8)\n\t\tif v < 0 {\n\t\t\terr = fmt.Errorf(\"tableSize cannot be negative: %d\", v)\n\t\t\treturn\n\t\t}\n\t\tsize = uint64(v)\n\tcase int16:\n\t\tv := raw.(int16)\n\t\tif v < 0 {\n\t\t\terr = fmt.Errorf(\"tableSize cannot be negative: %d\", v)\n\t\t\treturn\n\t\t}\n\t\tsize = uint64(v)\n\tcase int32:\n\t\tv := raw.(int32)\n\t\tif v < 0 {\n\t\t\terr = fmt.Errorf(\"tableSize cannot be negative: %d\", v)\n\t\t\treturn\n\t\t}\n\t\tsize = uint64(v)\n\tcase int64:\n\t\tv := raw.(int64)\n\t\tif v < 0 {\n\t\t\terr = fmt.Errorf(\"tableSize cannot be negative: %d\", v)\n\t\t\treturn\n\t\t}\n\t\tsize = uint64(v)\n\tdefault:\n\t\terr = fmt.Errorf(\"invalid type for tableSize: %s\", reflect.TypeOf(raw))\n\t\treturn\n\t}\n\treturn\n}\n\n// Fork creates a new RamBlock instance.\nfunc (rb *RamBlock) Fork(c *storage.Config) (storage.Engine, error) {\n\tif c == nil {\n\t\tc = rb.config.Copy()\n\t}\n\n\tchild, err := New(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt := table.New(rb.tableSize)\n\tchild.tables = append(child.tables, t)\n\tt.SetCoefficient(child.coefficient)\n\tchild.tablesByCoefficient[child.coefficient] = t\n\tchild.coefficient++\n\treturn child, nil\n}\n\nfunc (rb *RamBlock) Name() string {\n\treturn \"ramblock\"\n}\n\nfunc (rb *RamBlock) NewEntry() storage.Entry {\n\treturn entry.New()\n}\n\n// putWithRetry ensures at least one table exists and retries the given write\n// function on a new table when the current one runs out of space.\nfunc (rb *RamBlock) putWithRetry(writeFn func(t *table.Table) error) error {\n\tif len(rb.tables) == 0 {\n\t\tif err := rb.makeTable(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor {\n\t\t// Get the last value, storage only calls Put on the last created table.\n\t\tt := rb.tables[len(rb.tables)-1]\n\t\terr := writeFn(t)\n\t\tif errors.Is(err, table.ErrNotEnoughSpace) {\n\t\t\tif err := rb.makeTable(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// try again\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// everything is ok\n\t\treturn nil\n\t}\n}\n\n// PutRaw sets the raw value for the given key.\nfunc (rb *RamBlock) PutRaw(hkey uint64, value []byte) error {\n\tif uint64(len(value)) > rb.tableSize {\n\t\treturn storage.ErrEntryTooLarge\n\t}\n\n\treturn rb.putWithRetry(func(t *table.Table) error {\n\t\treturn t.PutRaw(hkey, value)\n\t})\n}\n\n// Put sets the value for the given key. It overwrites any previous value for that key\nfunc (rb *RamBlock) Put(hkey uint64, value storage.Entry) error {\n\tif requiredSizeForAnEntry(value) > rb.tableSize {\n\t\treturn storage.ErrEntryTooLarge\n\t}\n\n\treturn rb.putWithRetry(func(t *table.Table) error {\n\t\treturn t.Put(hkey, value)\n\t})\n}\n\n// GetRaw extracts encoded value for the given hkey. This is useful for merging tables.\nfunc (rb *RamBlock) GetRaw(hkey uint64) ([]byte, error) {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\traw, err := t.GetRaw(hkey)\n\t\tif errors.Is(err, table.ErrHKeyNotFound) {\n\t\t\t// Try out the other tables.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Found the key, return the stored value with its metadata.\n\t\treturn raw, nil\n\t}\n\n\t// Nothing here.\n\treturn nil, storage.ErrKeyNotFound\n}\n\n// Get gets the value for the given key. It returns storage.ErrKeyNotFound if the DB\n// does not contain the key. The returned Entry is its own copy,\n// it is safe to modify the contents of the returned slice.\nfunc (rb *RamBlock) Get(hkey uint64) (storage.Entry, error) {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\tres, err := t.Get(hkey)\n\t\tif errors.Is(err, table.ErrHKeyNotFound) {\n\t\t\t// Try out the other tables.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Found the key, return the stored value with its metadata.\n\t\treturn res, nil\n\t}\n\t// Nothing here.\n\treturn nil, storage.ErrKeyNotFound\n}\n\n// GetTTL gets the timeout for the given key. It returns storage.ErrKeyNotFound if the DB\n// does not contain the key.\nfunc (rb *RamBlock) GetTTL(hkey uint64) (int64, error) {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\tttl, err := t.GetTTL(hkey)\n\t\tif errors.Is(err, table.ErrHKeyNotFound) {\n\t\t\t// Try out the other tables.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\t// Found the key, return its ttl\n\t\treturn ttl, nil\n\t}\n\n\t// Nothing here.\n\treturn 0, storage.ErrKeyNotFound\n}\n\nfunc (rb *RamBlock) GetLastAccess(hkey uint64) (int64, error) {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\tlastAccess, err := t.GetLastAccess(hkey)\n\t\tif errors.Is(err, table.ErrHKeyNotFound) {\n\t\t\t// Try out the other tables.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\t// Found the key, return its ttl\n\t\treturn lastAccess, nil\n\t}\n\n\t// Nothing here.\n\treturn 0, storage.ErrKeyNotFound\n}\n\n// GetKey gets the key for the given hkey. It returns storage.ErrKeyNotFound if the DB\n// does not contain the key.\nfunc (rb *RamBlock) GetKey(hkey uint64) (string, error) {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\tkey, err := t.GetKey(hkey)\n\t\tif errors.Is(err, table.ErrHKeyNotFound) {\n\t\t\t// Try out the other tables.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\t// Found the key, return its ttl\n\t\treturn key, nil\n\t}\n\n\t// Nothing here.\n\treturn \"\", storage.ErrKeyNotFound\n}\n\n// Delete deletes the value for the given key. Delete will not returns error if key doesn't exist.\nfunc (rb *RamBlock) Delete(hkey uint64) error {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\terr := t.Delete(hkey)\n\t\tif errors.Is(err, table.ErrHKeyNotFound) {\n\t\t\t// Try out the other tables.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\t}\n\n\treturn nil\n}\n\n// UpdateTTL updates the expiry for the given key.\nfunc (rb *RamBlock) UpdateTTL(hkey uint64, data storage.Entry) error {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\terr := t.UpdateTTL(hkey, data)\n\t\tif errors.Is(err, table.ErrHKeyNotFound) {\n\t\t\t// Try out the other tables.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Found the key, return the stored value with its metadata.\n\t\treturn nil\n\t}\n\t// Nothing here.\n\treturn storage.ErrKeyNotFound\n}\n\n// Stats is a function which provides memory allocation and garbage ratio of a storage instance.\nfunc (rb *RamBlock) Stats() storage.Stats {\n\tstats := storage.Stats{\n\t\tNumTables: len(rb.tables),\n\t}\n\tfor _, t := range rb.tables {\n\t\ts := t.Stats()\n\t\tstats.Allocated += int(s.Allocated)\n\t\tstats.Inuse += int(s.Inuse)\n\t\tstats.Garbage += int(s.Garbage)\n\t\tstats.Length += s.Length\n\t}\n\treturn stats\n}\n\n// Check checks the key existence.\nfunc (rb *RamBlock) Check(hkey uint64) bool {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\tok := t.Check(hkey)\n\t\tif ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// Nothing there.\n\treturn false\n}\n\n// Range calls f sequentially for each key and value present in the map.\n// If f returns false, range stops the iteration. Range may be O(N) with\n// the number of elements in the map even if f returns false after a constant\n// number of calls.\nfunc (rb *RamBlock) Range(f func(hkey uint64, e storage.Entry) bool) {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\tt.Range(func(hkey uint64, e storage.Entry) bool {\n\t\t\treturn f(hkey, e)\n\t\t})\n\t}\n}\n\n// RangeHKey calls f sequentially for each key present in the map.\n// If f returns false, range stops the iteration. Range may be O(N) with\n// the number of elements in the map even if f returns false after a constant\n// number of calls.\nfunc (rb *RamBlock) RangeHKey(f func(hkey uint64) bool) {\n\t// Scan available tables by starting the last added table.\n\tfor i := len(rb.tables) - 1; i >= 0; i-- {\n\t\tt := rb.tables[i]\n\t\tt.RangeHKey(func(hkey uint64) bool {\n\t\t\treturn f(hkey)\n\t\t})\n\t}\n}\n\nfunc (rb *RamBlock) findCoefficient(coefficient uint64) (uint64, error) {\n\tvar sortedCoefficients []uint64\n\tfor newCf := range rb.tablesByCoefficient {\n\t\tsortedCoefficients = append(sortedCoefficients, newCf)\n\t}\n\tsort.Slice(sortedCoefficients, func(i, j int) bool { return sortedCoefficients[i] < sortedCoefficients[j] })\n\tfor _, cf := range sortedCoefficients {\n\t\tif cf > coefficient {\n\t\t\treturn cf, nil\n\t\t}\n\t}\n\treturn 0, io.EOF\n}\n\nfunc (rb *RamBlock) scanCommon(cursor uint64, expr string, count int, f func(e storage.Entry) bool) (uint64, error) {\n\tif len(rb.tables) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tvar err error\n\tcf := cursor / rb.tableSize\n\tt, ok := rb.tablesByCoefficient[cf]\n\tif !ok {\n\t\tcf, err = rb.findCoefficient(cf)\n\t\tif err != nil {\n\t\t\t// Invalid cursor\n\t\t\treturn 0, nil\n\t\t}\n\t\tt = rb.tablesByCoefficient[cf]\n\t\tcursor = cf * rb.tableSize\n\t}\n\n\tvar tableCursor = cursor\n\tif cf > 0 {\n\t\ttableCursor = cursor - (rb.tableSize * cf)\n\t}\n\n\tif expr == \"\" {\n\t\ttableCursor, err = t.Scan(tableCursor, count, f)\n\t} else {\n\t\ttableCursor, err = t.ScanRegexMatch(tableCursor, expr, count, f)\n\t}\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif tableCursor == 0 {\n\t\t_, ok := rb.tablesByCoefficient[cf+1]\n\t\tif !ok {\n\t\t\tcf, err = rb.findCoefficient(cf)\n\t\t\tif err != nil {\n\t\t\t\t// Invalid cursor\n\t\t\t\treturn 0, nil\n\t\t\t}\n\t\t\t// findCoefficient already returns the next valid coefficient\n\t\t\treturn rb.tableSize * cf, nil\n\t\t}\n\t\t// The next table\n\t\treturn rb.tableSize * (cf + 1), nil\n\t}\n\n\treturn tableCursor + (rb.tableSize * cf), nil\n}\n\nfunc (rb *RamBlock) Scan(cursor uint64, count int, f func(e storage.Entry) bool) (uint64, error) {\n\treturn rb.scanCommon(cursor, \"\", count, f)\n}\n\nfunc (rb *RamBlock) ScanRegexMatch(cursor uint64, expr string, count int, f func(e storage.Entry) bool) (uint64, error) {\n\treturn rb.scanCommon(cursor, expr, count, f)\n}\n\nfunc (rb *RamBlock) Close() error {\n\treturn nil\n}\n\nfunc (rb *RamBlock) Destroy() error {\n\treturn nil\n}\n\nvar _ storage.Engine = (*RamBlock)(nil)\n"
  },
  {
    "path": "internal/ramblock/ramblock_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage ramblock\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/cespare/xxhash/v2\"\n\t\"github.com/olric-data/olric/internal/ramblock/entry\"\n\t\"github.com/olric-data/olric/internal/ramblock/table\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc bkey(i int) string {\n\treturn fmt.Sprintf(\"%09d\", i)\n}\n\nfunc bval(i int) []byte {\n\treturn []byte(fmt.Sprintf(\"%025d\", i))\n}\n\nfunc testRamBlock(t *testing.T, c *storage.Config) storage.Engine {\n\tkv, err := New(c)\n\trequire.NoError(t, err)\n\n\tchild, err := kv.Fork(nil)\n\trequire.NoError(t, err)\n\n\terr = child.Start()\n\trequire.NoError(t, err)\n\n\treturn child\n}\n\nfunc TestRamBlock_Put(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc TestRamBlock_Get(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\ttimestamp := time.Now().UnixNano()\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\te, err := s.Get(hkey)\n\t\trequire.NoError(t, err)\n\n\t\trequire.Equal(t, bkey(i), e.Key())\n\t\trequire.Equal(t, int64(i), e.TTL())\n\t\trequire.Equal(t, bval(i), e.Value())\n\t\trequire.Equal(t, timestamp, e.Timestamp())\n\t}\n}\n\nfunc TestRamBlock_Delete(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tgarbage := make(map[int]uint64)\n\tfor i, tb := range s.(*RamBlock).tables {\n\t\ts := tb.Stats()\n\t\tgarbage[i] = s.Inuse\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\terr := s.Delete(hkey)\n\t\trequire.NoError(t, err)\n\n\t\t_, err = s.Get(hkey)\n\t\trequire.ErrorIs(t, err, storage.ErrKeyNotFound)\n\t}\n\n\tfor i, tb := range s.(*RamBlock).tables {\n\t\ts := tb.Stats()\n\t\trequire.Equal(t, uint64(0), s.Inuse)\n\t\trequire.Equal(t, 0, s.Length)\n\t\trequire.Equal(t, garbage[i], s.Garbage)\n\t}\n}\n\nfunc TestRamBlock_ExportImport(t *testing.T) {\n\ttimestamp := time.Now().UnixNano()\n\ts := testRamBlock(t, nil)\n\n\tfor i := 0; i < 1000; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfresh := testRamBlock(t, nil)\n\n\tti := s.TransferIterator()\n\tfor ti.Next() {\n\t\tdata, index, err := ti.Export()\n\t\trequire.NoError(t, err)\n\n\t\terr = fresh.Import(data, func(u uint64, e storage.Entry) error {\n\t\t\treturn fresh.Put(u, e)\n\t\t})\n\t\trequire.NoError(t, err)\n\n\t\terr = ti.Drop(index)\n\t\trequire.NoError(t, err)\n\t}\n\n\t_, _, err := ti.Export()\n\trequire.ErrorIs(t, err, io.EOF)\n\n\tfor i := 0; i < 1000; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\te, err := fresh.Get(hkey)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, bkey(i), e.Key())\n\t\trequire.Equal(t, int64(i), e.TTL())\n\t\trequire.Equal(t, bval(i), e.Value())\n\t\trequire.Equal(t, timestamp, e.Timestamp())\n\t}\n}\n\nfunc TestRamBlock_Stats_Length(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\trequire.Equal(t, 100, s.Stats().Length)\n}\n\nfunc TestRamBlock_Range(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\thkeys := make(map[uint64]struct{})\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\n\t\thkeys[hkey] = struct{}{}\n\t}\n\n\ts.Range(func(hkey uint64, entry storage.Entry) bool {\n\t\t_, ok := hkeys[hkey]\n\t\trequire.Truef(t, ok, \"Invalid hkey: %d\", hkey)\n\t\treturn true\n\t})\n}\n\nfunc TestRamBlock_Check(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\thkeys := make(map[uint64]struct{})\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\n\t\thkeys[hkey] = struct{}{}\n\t}\n\n\tfor hkey := range hkeys {\n\t\trequire.Truef(t, s.Check(hkey), \"hkey could not be found: %d\", hkey)\n\t}\n}\n\nfunc TestRamBlock_UpdateTTL(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(10)\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.UpdateTTL(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\te, err := s.Get(hkey)\n\t\trequire.NoError(t, err)\n\n\t\tif e.Key() != bkey(i) {\n\t\t\tt.Fatalf(\"Expected key: %s. Got %s\", bkey(i), e.Key())\n\t\t}\n\t\tif e.TTL() != 10 {\n\t\t\tt.Fatalf(\"Expected ttl: %d. Got %v\", i, e.TTL())\n\t\t}\n\t}\n}\n\nfunc TestRamBlock_GetKey(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\te := entry.New()\n\te.SetKey(bkey(1))\n\te.SetTTL(int64(1))\n\te.SetValue(bval(1))\n\thkey := xxhash.Sum64([]byte(e.Key()))\n\terr := s.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tkey, err := s.GetKey(hkey)\n\trequire.NoError(t, err)\n\n\tif key != bkey(1) {\n\t\tt.Fatalf(\"Expected %s. Got %v\", bkey(1), key)\n\t}\n}\n\nfunc TestRamBlock_PutRawGetRaw(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\tvalue := []byte(\"value\")\n\thkey := xxhash.Sum64([]byte(\"key\"))\n\terr := s.PutRaw(hkey, value)\n\trequire.NoError(t, err)\n\n\trawval, err := s.GetRaw(hkey)\n\trequire.NoError(t, err)\n\n\tif bytes.Equal(value, rawval) {\n\t\tt.Fatalf(\"Expected %s. Got %v\", value, rawval)\n\t}\n}\n\nfunc TestRamBlock_GetTTL(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\te := entry.New()\n\te.SetKey(bkey(1))\n\te.SetTTL(int64(1))\n\te.SetValue(bval(1))\n\n\thkey := xxhash.Sum64([]byte(e.Key()))\n\terr := s.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tttl, err := s.GetTTL(hkey)\n\trequire.NoError(t, err)\n\n\tif ttl != e.TTL() {\n\t\tt.Fatalf(\"Expected TTL %d. Got %d\", ttl, e.TTL())\n\t}\n}\n\nfunc TestRamBlock_GetLastAccess(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\te := entry.New()\n\te.SetKey(bkey(1))\n\te.SetTTL(int64(1))\n\te.SetValue(bval(1))\n\n\thkey := xxhash.Sum64([]byte(e.Key()))\n\terr := s.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tlastAccess, err := s.GetLastAccess(hkey)\n\trequire.NoError(t, err)\n\trequire.NotEqual(t, 0, lastAccess)\n}\n\nfunc TestRamBlock_Fork(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\ttimestamp := time.Now().UnixNano()\n\tfor i := 0; i < 10; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tchild, err := s.Fork(nil)\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 100; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\t_, err = child.Get(hkey)\n\t\tif !errors.Is(err, storage.ErrKeyNotFound) {\n\t\t\tt.Fatalf(\"Expected storage.ErrKeyNotFound. Got %v\", err)\n\t\t}\n\t}\n\n\tstats := child.Stats()\n\tif uint64(stats.Allocated) != defaultTableSize {\n\t\tt.Fatalf(\"Expected Stats.Allocated: %d. Got: %d\", defaultTableSize, stats.Allocated)\n\t}\n\n\tif stats.Inuse != 0 {\n\t\tt.Fatalf(\"Expected Stats.Inuse: 0. Got: %d\", stats.Inuse)\n\t}\n\n\tif stats.Garbage != 0 {\n\t\tt.Fatalf(\"Expected Stats.Garbage: 0. Got: %d\", stats.Garbage)\n\t}\n\n\tif stats.Length != 0 {\n\t\tt.Fatalf(\"Expected Stats.Length: 0. Got: %d\", stats.Length)\n\t}\n\n\tif stats.NumTables != 1 {\n\t\tt.Fatalf(\"Expected Stats.NumTables: 1. Got: %d\", stats.NumTables)\n\t}\n}\n\nfunc TestRamBlock_StateChange(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\ttimestamp := time.Now().UnixNano()\n\t// Current free space is 1 MB. Trigger a compaction operation.\n\tfor i := 0; i < 100000; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue([]byte(fmt.Sprintf(\"%01000d\", i)))\n\t\te.SetTTL(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i, tb := range s.(*RamBlock).tables {\n\t\tif tb.State() == table.ReadWriteState {\n\t\t\trequire.Equalf(t, len(s.(*RamBlock).tables)-1, i, \"Writable table has to be the latest table\")\n\t\t} else if tb.State() == table.ReadOnlyState {\n\t\t\trequire.True(t, i < len(s.(*RamBlock).tables)-1)\n\t\t}\n\t}\n}\n\nfunc TestRamBlock_NewEntry(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\ti := s.NewEntry()\n\t_, ok := i.(*entry.Entry)\n\trequire.True(t, ok)\n}\n\nfunc TestRamBlock_Name(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\trequire.Equal(t, \"ramblock\", s.Name())\n}\n\nfunc TestRamBlock_CloseDestroy(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\trequire.NoError(t, s.Close())\n\trequire.NoError(t, s.Destroy())\n}\n\nfunc TestStorage_Scan(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\tfor i := 0; i < 1000000; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tvar (\n\t\tcount  int\n\t\tcursor uint64\n\t\terr    error\n\t)\n\tk := s.(*RamBlock)\n\tfor {\n\t\tcursor, err = k.Scan(cursor, 10, func(e storage.Entry) bool {\n\t\t\tcount++\n\t\t\treturn true\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tif cursor == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\trequire.Equal(t, 1000000, count)\n}\n\nfunc TestStorage_ScanRegexMatch(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\tvar key string\n\tfor i := 0; i < 1000000; i++ {\n\t\tif i%2 == 0 {\n\t\t\tkey = \"even:\" + strconv.Itoa(i)\n\t\t} else {\n\t\t\tkey = \"odd:\" + strconv.Itoa(i)\n\t\t}\n\n\t\te := entry.New()\n\t\te.SetKey(key)\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tvar (\n\t\tcount  int\n\t\tcursor uint64\n\t\terr    error\n\t)\n\tk := s.(*RamBlock)\n\tfor {\n\t\tcursor, err = k.ScanRegexMatch(cursor, \"even:\", 10, func(entry storage.Entry) bool {\n\t\t\tcount++\n\t\t\treturn true\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tif cursor == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\trequire.Equal(t, 500000, count)\n}\n\nfunc TestStorage_ScanRegexMatch_OnlyOneEntry(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\te := entry.New()\n\te.SetKey(\"even:200\")\n\te.SetTTL(123123)\n\te.SetValue([]byte(\"my-value\"))\n\te.SetTimestamp(time.Now().UnixNano())\n\thkey := xxhash.Sum64([]byte(e.Key()))\n\terr := s.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tvar (\n\t\tnum    int\n\t\tcount  int\n\t\tcursor uint64\n\t)\n\tk := s.(*RamBlock)\n\tfor {\n\t\tnum += 1\n\t\tcursor, err = k.ScanRegexMatch(cursor, \"even:\", 10, func(entry storage.Entry) bool {\n\t\t\tcount++\n\t\t\trequire.Equal(t, \"even:200\", e.Key())\n\t\t\trequire.Equal(t, \"my-value\", string(e.Value()))\n\t\t\treturn true\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tif cursor == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\trequire.Equal(t, 1, num)\n\trequire.Equal(t, 1, count)\n}\n\nfunc TestStorage_Scan_NonContiguousCoefficients(t *testing.T) {\n\t// Use a small tableSize so that multiple tables are created quickly.\n\tc := DefaultConfig()\n\tc.Add(\"tableSize\", 1024)\n\ts := testRamBlock(t, c)\n\tk := s.(*RamBlock)\n\n\t// Insert enough entries to create several tables (at least 4).\n\tfor i := 0; i < 200; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetValue(bval(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetTimestamp(time.Now().UnixNano())\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\trequire.Greater(t, len(k.tables), 3, \"need at least 4 tables for this test\")\n\n\t// Count entries per table before deletion.\n\ttotalBefore := 0\n\tfor _, tbl := range k.tables {\n\t\ttotalBefore += tbl.Stats().Length\n\t}\n\n\t// Pick a middle table to delete (simulate compaction gap).\n\t// Find a table that is not the first or last and has entries.\n\tvar deletedTable *table.Table\n\tfor _, tbl := range k.tables[1 : len(k.tables)-1] {\n\t\tif tbl.Stats().Length > 0 {\n\t\t\tdeletedTable = tbl\n\t\t\tbreak\n\t\t}\n\t}\n\trequire.NotNil(t, deletedTable, \"could not find a middle table to delete\")\n\n\tdeletedCf := deletedTable.Coefficient()\n\tdeletedCount := deletedTable.Stats().Length\n\n\t// Remove the table from tablesByCoefficient to create a gap (like compaction does).\n\tdelete(k.tablesByCoefficient, deletedCf)\n\n\t// Remove from tables slice as well.\n\tfor i, tbl := range k.tables {\n\t\tif tbl == deletedTable {\n\t\t\tk.tables = append(k.tables[:i], k.tables[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\n\texpectedCount := totalBefore - deletedCount\n\n\t// Scan all remaining entries.\n\tvar (\n\t\tscannedCount int\n\t\tcursor       uint64\n\t\terr          error\n\t)\n\tfor {\n\t\tcursor, err = k.Scan(cursor, 10, func(e storage.Entry) bool {\n\t\t\tscannedCount++\n\t\t\treturn true\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tif cursor == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\trequire.Equal(t, expectedCount, scannedCount,\n\t\t\"scan should find all entries in remaining tables after coefficient gap\")\n}\n\nfunc TestRamBlock_Put_ErrEntryTooLarge(t *testing.T) {\n\tc := DefaultConfig()\n\tc.Add(\"tableSize\", 1024)\n\ts := testRamBlock(t, c)\n\tvalue := make([]byte, 2048)\n\te := entry.New()\n\te.SetKey(\"key\")\n\te.SetValue(value)\n\te.SetTTL(10)\n\te.SetTimestamp(time.Now().UnixNano())\n\thkey := xxhash.Sum64([]byte(e.Key()))\n\n\terr := s.Put(hkey, e)\n\trequire.ErrorIs(t, err, storage.ErrEntryTooLarge)\n}\n\nfunc TestPrepareTableSize_NegativeValues(t *testing.T) {\n\tnegativeTests := []struct {\n\t\tname string\n\t\traw  interface{}\n\t}{\n\t\t{\"int\", int(-1)},\n\t\t{\"int8\", int8(-1)},\n\t\t{\"int16\", int16(-1)},\n\t\t{\"int32\", int32(-1)},\n\t\t{\"int64\", int64(-1)},\n\t}\n\n\tfor _, tt := range negativeTests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t_, err := prepareTableSize(tt.raw)\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Contains(t, err.Error(), \"tableSize cannot be negative\")\n\t\t})\n\t}\n}\n\nfunc TestPrepareTableSize_ValidValues(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\traw      interface{}\n\t\texpected uint64\n\t}{\n\t\t{\"uint64\", uint64(1024), 1024},\n\t\t{\"uint32\", uint32(1024), 1024},\n\t\t{\"uint\", uint(1024), 1024},\n\t\t{\"int\", int(1024), 1024},\n\t\t{\"int64\", int64(1024), 1024},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tsize, err := prepareTableSize(tt.raw)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, tt.expected, size)\n\t\t})\n\t}\n}\n\nfunc TestPrepareTableSize_InvalidType(t *testing.T) {\n\t_, err := prepareTableSize(\"invalid\")\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"invalid type for tableSize\")\n}\n\nfunc TestRamBlock_New_NegativeTableSize(t *testing.T) {\n\tc := storage.NewConfig(nil)\n\tc.Add(\"tableSize\", int(-1))\n\tc.Add(\"maxIdleTableTimeout\", defaultMaxIdleTableTimeout)\n\t_, err := New(c)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"tableSize cannot be negative\")\n}\n\nfunc TestRamBlock_Start_NilConfig(t *testing.T) {\n\tkv, err := New(nil)\n\trequire.NoError(t, err)\n\n\tkv.SetConfig(nil)\n\terr = kv.Start()\n\trequire.Error(t, err)\n\trequire.Equal(t, \"config cannot be nil\", err.Error())\n}\n\nfunc TestRamBlock_PutRaw_ErrEntryTooLarge(t *testing.T) {\n\tc := DefaultConfig()\n\tc.Add(\"tableSize\", 1024)\n\ts := testRamBlock(t, c)\n\n\tvalue := make([]byte, 2048)\n\thkey := xxhash.Sum64([]byte(\"key\"))\n\terr := s.PutRaw(hkey, value)\n\trequire.ErrorIs(t, err, storage.ErrEntryTooLarge)\n}\n\nfunc TestRamBlock_GetRaw_KeyNotFound(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\thkey := xxhash.Sum64([]byte(\"nonexistent\"))\n\traw, err := s.GetRaw(hkey)\n\trequire.ErrorIs(t, err, storage.ErrKeyNotFound)\n\trequire.Nil(t, raw)\n}\n\nfunc TestRamBlock_GetTTL_KeyNotFound(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\thkey := xxhash.Sum64([]byte(\"nonexistent\"))\n\tttl, err := s.GetTTL(hkey)\n\trequire.ErrorIs(t, err, storage.ErrKeyNotFound)\n\trequire.Equal(t, int64(0), ttl)\n}\n\nfunc TestRamBlock_GetLastAccess_KeyNotFound(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\thkey := xxhash.Sum64([]byte(\"nonexistent\"))\n\tlastAccess, err := s.GetLastAccess(hkey)\n\trequire.ErrorIs(t, err, storage.ErrKeyNotFound)\n\trequire.Equal(t, int64(0), lastAccess)\n}\n\nfunc TestRamBlock_GetKey_KeyNotFound(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\thkey := xxhash.Sum64([]byte(\"nonexistent\"))\n\tkey, err := s.GetKey(hkey)\n\trequire.ErrorIs(t, err, storage.ErrKeyNotFound)\n\trequire.Equal(t, \"\", key)\n}\n\nfunc TestRamBlock_Delete_NonExistentKey(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\thkey := xxhash.Sum64([]byte(\"nonexistent\"))\n\terr := s.Delete(hkey)\n\trequire.NoError(t, err)\n}\n\nfunc TestRamBlock_UpdateTTL_KeyNotFound(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\te := entry.New()\n\te.SetTTL(100)\n\te.SetTimestamp(time.Now().UnixNano())\n\n\thkey := xxhash.Sum64([]byte(\"nonexistent\"))\n\terr := s.UpdateTTL(hkey, e)\n\trequire.ErrorIs(t, err, storage.ErrKeyNotFound)\n}\n\nfunc TestRamBlock_Check_KeyNotFound(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\thkey := xxhash.Sum64([]byte(\"nonexistent\"))\n\trequire.False(t, s.Check(hkey))\n}\n\nfunc TestRamBlock_RangeHKey(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\texpected := make(map[uint64]struct{})\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetValue(bval(i))\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t\texpected[hkey] = struct{}{}\n\t}\n\n\tcollected := make(map[uint64]struct{})\n\ts.RangeHKey(func(hkey uint64) bool {\n\t\tcollected[hkey] = struct{}{}\n\t\treturn true\n\t})\n\n\trequire.Equal(t, expected, collected)\n}\n\nfunc TestRamBlock_SetConfig(t *testing.T) {\n\tkv, err := New(nil)\n\trequire.NoError(t, err)\n\n\tnewConfig := storage.NewConfig(nil)\n\tnewConfig.Add(\"tableSize\", uint64(2048))\n\tnewConfig.Add(\"maxIdleTableTimeout\", defaultMaxIdleTableTimeout)\n\n\tkv.SetConfig(newConfig)\n\n\traw, err := kv.config.Get(\"tableSize\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, uint64(2048), raw)\n}\n\nfunc TestRamBlock_Fork_CustomConfig(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\tcustomConfig := DefaultConfig()\n\tcustomConfig.Add(\"tableSize\", uint64(2048))\n\n\tchild, err := s.Fork(customConfig)\n\trequire.NoError(t, err)\n\n\tchildKV := child.(*RamBlock)\n\trequire.Equal(t, uint64(2048), childKV.tableSize)\n\trequire.Equal(t, 1, len(childKV.tables))\n\trequire.Equal(t, 0, child.Stats().Length)\n}\n\nfunc TestRamBlock_MakeTable_RecycledTableReuse(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\ttimestamp := time.Now().UnixNano()\n\t// Insert entries with large values to fill multiple tables (default 1MB each).\n\tfor i := 0; i < 1500; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue([]byte(fmt.Sprintf(\"%01000d\", i)))\n\t\te.SetTimestamp(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Delete enough entries to exceed the 40% garbage ratio on a table.\n\tfor i := 0; i < 750; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\terr := s.Delete(hkey)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Run compaction until done to produce RecycledState tables.\n\tfor {\n\t\tdone, err := s.Compaction()\n\t\trequire.NoError(t, err)\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tk := s.(*RamBlock)\n\n\t// Verify at least one recycled table exists.\n\tvar recycledFound bool\n\tfor _, tb := range k.tables {\n\t\tif tb.State() == table.RecycledState {\n\t\t\trecycledFound = true\n\t\t\tbreak\n\t\t}\n\t}\n\trequire.True(t, recycledFound, \"Expected at least one RecycledState table after compaction\")\n\n\ttableCountBefore := len(k.tables)\n\n\t// Fill the current writable table to trigger makeTable via putWithRetry.\n\tstartIdx := 2000\n\tfor i := startIdx; i < startIdx+1500; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue([]byte(fmt.Sprintf(\"%01000d\", i)))\n\t\te.SetTimestamp(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// The recycled table should have been reused: no RecycledState tables remain.\n\tfor _, tb := range k.tables {\n\t\trequire.NotEqual(t, table.RecycledState, tb.State(),\n\t\t\t\"Expected no RecycledState tables after reuse\")\n\t}\n\n\t// The last table must be writable.\n\tlastTable := k.tables[len(k.tables)-1]\n\trequire.Equal(t, table.ReadWriteState, lastTable.State())\n\n\t// Table count should not have grown beyond what's needed (recycled table was reused).\n\trequire.LessOrEqual(t, len(k.tables), tableCountBefore+1,\n\t\t\"Expected recycled table reuse to limit table growth\")\n}\n\nfunc TestRamBlock_EvictTable_PutRawError(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\ttimestamp := time.Now().UnixNano()\n\t// Insert entries with large values to fill multiple tables.\n\tfor i := 0; i < 1500; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue([]byte(fmt.Sprintf(\"%01000d\", i)))\n\t\te.SetTimestamp(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Delete enough entries to exceed the 40% garbage ratio on a table.\n\tfor i := 0; i < 750; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\terr := s.Delete(hkey)\n\t\trequire.NoError(t, err)\n\t}\n\n\tk := s.(*RamBlock)\n\n\t// Shrink tableSize to force PutRaw to return ErrEntryTooLarge during eviction.\n\tk.tableSize = 1\n\n\tdone, err := k.Compaction()\n\trequire.False(t, done)\n\trequire.Error(t, err)\n\trequire.ErrorIs(t, err, storage.ErrEntryTooLarge)\n}\n\nfunc TestRamBlock_Compaction_NoTables(t *testing.T) {\n\tk, err := New(nil)\n\trequire.NoError(t, err)\n\n\terr = k.Start()\n\trequire.NoError(t, err)\n\n\t// No tables exist — Compaction should return done=true with no error.\n\tdone, compactionErr := k.Compaction()\n\trequire.NoError(t, compactionErr)\n\trequire.True(t, done)\n}\n\nfunc TestRamBlock_IsCompactionOK_ExactThreshold(t *testing.T) {\n\t// Each entry: key=1 byte + value=70 bytes + metadata=29 bytes = 100 bytes.\n\t// Table size 1000 → 9 entries fit. Deleting N entries → garbage = N*100.\n\t// maxGarbageRatio = 0.40, threshold = 1000 * 0.40 = 400.\n\n\ttests := []struct {\n\t\tname        string\n\t\tdeleteCount int\n\t\texpected    bool\n\t}{\n\t\t{\"ExactBoundary\", 4, true},  // garbage=400, ratio=0.40 → >=0.40 → true\n\t\t{\"BelowBoundary\", 3, false}, // garbage=300, ratio=0.30 → <0.40 → false\n\t\t{\"AboveBoundary\", 5, true},  // garbage=500, ratio=0.50 → >=0.40 → true\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tconfig := DefaultConfig()\n\t\t\tconfig.Add(\"tableSize\", uint64(1000))\n\t\t\ts := testRamBlock(t, config)\n\n\t\t\ttimestamp := time.Now().UnixNano()\n\t\t\t// Insert 9 entries of exactly 100 bytes each.\n\t\t\tfor i := 0; i < 9; i++ {\n\t\t\t\te := entry.New()\n\t\t\t\te.SetKey(fmt.Sprintf(\"%01d\", i))            // 1-byte key\n\t\t\t\te.SetValue([]byte(fmt.Sprintf(\"%070d\", i))) // 70-byte value\n\t\t\t\te.SetTimestamp(timestamp)\n\t\t\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\t\t\terr := s.Put(hkey, e)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\t// Delete entries to reach desired garbage level.\n\t\t\tfor i := 0; i < tc.deleteCount; i++ {\n\t\t\t\thkey := xxhash.Sum64([]byte(fmt.Sprintf(\"%01d\", i)))\n\t\t\t\terr := s.Delete(hkey)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tk := s.(*RamBlock)\n\t\t\ttb := k.tables[0]\n\n\t\t\tstats := tb.Stats()\n\t\t\trequire.Equal(t, uint64(1000), stats.Allocated)\n\t\t\trequire.Equal(t, uint64(tc.deleteCount*100), stats.Garbage,\n\t\t\t\t\"Expected garbage = %d\", tc.deleteCount*100)\n\n\t\t\tresult := k.isCompactionOK(tb)\n\t\t\trequire.Equal(t, tc.expected, result)\n\t\t})\n\t}\n}\n\nfunc TestTransferIterator_Drop_EmptyTables(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\n\t// Put a single entry so we have one table with data.\n\te := entry.New()\n\te.SetKey(bkey(0))\n\te.SetValue(bval(0))\n\te.SetTimestamp(time.Now().UnixNano())\n\thkey := xxhash.Sum64([]byte(e.Key()))\n\terr := s.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\t// Drain all tables via Export + Drop.\n\tti := s.TransferIterator()\n\tfor ti.Next() {\n\t\t_, index, err := ti.Export()\n\t\trequire.NoError(t, err)\n\n\t\terr = ti.Drop(index)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Now tables slice is empty. Drop should return an error.\n\terr = ti.Drop(0)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"there is no table to drop\")\n}\n\nfunc TestTransferIterator_Export_SkipsRecycledState(t *testing.T) {\n\ts := testRamBlock(t, nil)\n\tk := s.(*RamBlock)\n\n\t// Insert enough data to create at least 2 tables.\n\ttimestamp := time.Now().UnixNano()\n\tfor i := 0; i < 100000; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue([]byte(fmt.Sprintf(\"%01000d\", i)))\n\t\te.SetTimestamp(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := s.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\trequire.Greater(t, len(k.tables), 1, \"need at least 2 tables for this test\")\n\n\t// Set the first table to RecycledState.\n\tk.tables[0].SetState(table.RecycledState)\n\n\tti := s.TransferIterator()\n\n\t// Export should skip the recycled table and return the next non-recycled one.\n\tdata, index, err := ti.Export()\n\trequire.NoError(t, err)\n\trequire.NotNil(t, data)\n\trequire.Greater(t, index, 0, \"Expected Export to skip index 0 (recycled table)\")\n\n\t// Now set ALL tables to RecycledState.\n\tfor _, tb := range k.tables {\n\t\ttb.SetState(table.RecycledState)\n\t}\n\n\t// Export should return io.EOF when all tables are recycled.\n\t_, _, err = ti.Export()\n\trequire.ErrorIs(t, err, io.EOF)\n}\n"
  },
  {
    "path": "internal/ramblock/table/pack.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage table\n\nimport (\n\t\"github.com/RoaringBitmap/roaring/roaring64\"\n\t\"github.com/vmihailenco/msgpack/v5\"\n)\n\n// Pack is the serializable representation of a Table. It is used by Encode and\n// Decode to transfer table data between nodes via msgpack serialization.\ntype Pack struct {\n\tOffset      uint64\n\tAllocated   uint64\n\tInuse       uint64\n\tGarbage     uint64\n\tRecycledAt  int64\n\tState       State\n\tHKeys       map[uint64]uint64\n\tOffsetIndex []byte\n\tMemory      []byte\n}\n\n// Encode serializes the given Table into a msgpack-encoded byte slice. Only the\n// active portion of the memory buffer (up to the current offset) is included.\nfunc Encode(t *Table) ([]byte, error) {\n\toffsetIndex, err := t.offsetIndex.MarshalBinary()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp := Pack{\n\t\tOffset:      t.offset,\n\t\tAllocated:   t.allocated,\n\t\tInuse:       t.inuse,\n\t\tGarbage:     t.garbage,\n\t\tRecycledAt:  t.recycledAt,\n\t\tState:       t.state,\n\t\tHKeys:       t.hkeys,\n\t\tOffsetIndex: offsetIndex,\n\t}\n\tp.Memory = make([]byte, t.offset)\n\tcopy(p.Memory, t.memory[:t.offset])\n\n\treturn msgpack.Marshal(p)\n}\n\n// Decode deserializes a msgpack-encoded byte slice into a new Table, restoring\n// all entries, metadata, and the offset index.\nfunc Decode(data []byte) (*Table, error) {\n\tp := &Pack{}\n\terr := msgpack.Unmarshal(data, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trb := roaring64.New()\n\terr = rb.UnmarshalBinary(p.OffsetIndex)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt := New(p.Allocated)\n\tt.offset = p.Offset\n\tt.inuse = p.Inuse\n\tt.garbage = p.Garbage\n\tt.recycledAt = p.RecycledAt\n\tt.state = p.State\n\tt.hkeys = p.HKeys\n\tt.offsetIndex = rb\n\n\tcopy(t.memory[:t.offset], p.Memory)\n\n\treturn t, nil\n}\n"
  },
  {
    "path": "internal/ramblock/table/pack_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage table\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/cespare/xxhash/v2\"\n\t\"github.com/olric-data/olric/internal/ramblock/entry\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc bkey(i int) string {\n\treturn fmt.Sprintf(\"%09d\", i)\n}\n\nfunc bval(i int) []byte {\n\treturn []byte(fmt.Sprintf(\"%025d\", i))\n}\n\nfunc TestTable_Pack_Decode_CorruptData(t *testing.T) {\n\t_, err := Decode([]byte(\"this is not valid msgpack data\"))\n\trequire.Error(t, err)\n}\n\nfunc TestTable_Pack_EncodeDecode_GarbageAndRecycledAt(t *testing.T) {\n\tsize := uint64(1 << 16)\n\ttb := New(size)\n\n\tfor i := 0; i < 20; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetValue(bval(i))\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := tb.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Delete some entries to create garbage\n\tfor i := 0; i < 10; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\terr := tb.Delete(hkey)\n\t\trequire.NoError(t, err)\n\t}\n\n\tstatsBefore := tb.Stats()\n\trequire.Greater(t, statsBefore.Garbage, uint64(0))\n\n\t// Reset to set recycledAt\n\ttb.Reset()\n\tstatsAfterReset := tb.Stats()\n\trequire.NotEqual(t, int64(0), statsAfterReset.RecycledAt)\n\n\t// Re-add some entries after reset to have garbage again\n\ttb.SetState(ReadWriteState)\n\tfor i := 100; i < 110; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetValue(bval(i))\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := tb.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\t// Delete a few to accumulate garbage\n\tfor i := 100; i < 105; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\terr := tb.Delete(hkey)\n\t\trequire.NoError(t, err)\n\t}\n\n\tstatsBeforeEncode := tb.Stats()\n\trequire.Greater(t, statsBeforeEncode.Garbage, uint64(0))\n\trequire.NotEqual(t, int64(0), statsBeforeEncode.RecycledAt)\n\n\tencoded, err := Encode(tb)\n\trequire.NoError(t, err)\n\n\tdecoded, err := Decode(encoded)\n\trequire.NoError(t, err)\n\n\tstatsAfterDecode := decoded.Stats()\n\trequire.Equal(t, statsBeforeEncode.Garbage, statsAfterDecode.Garbage)\n\trequire.Equal(t, statsBeforeEncode.RecycledAt, statsAfterDecode.RecycledAt)\n\trequire.Equal(t, statsBeforeEncode.Inuse, statsAfterDecode.Inuse)\n\trequire.Equal(t, statsBeforeEncode.Length, statsAfterDecode.Length)\n}\n\nfunc TestTable_Pack_EncodeDecode(t *testing.T) {\n\tsize := uint64(1 << 16)\n\ttb := New(size)\n\n\ttimestamp := time.Now().UnixNano()\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\te.SetKey(bkey(i))\n\t\te.SetTTL(int64(i))\n\t\te.SetValue(bval(i))\n\t\te.SetLastAccess(timestamp)\n\t\thkey := xxhash.Sum64([]byte(e.Key()))\n\t\terr := tb.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tencoded, err := Encode(tb)\n\trequire.NoError(t, err)\n\n\tnewTable, err := Decode(encoded)\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 100; i++ {\n\t\thkey := xxhash.Sum64([]byte(bkey(i)))\n\t\te, err := newTable.Get(hkey)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, e.Key(), bkey(i))\n\t\trequire.Equal(t, e.Value(), bval(i))\n\t\trequire.Equal(t, e.TTL(), int64(i))\n\t\trequire.NotEqual(t, timestamp, e.LastAccess())\n\t}\n\n}\n"
  },
  {
    "path": "internal/ramblock/table/table.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage table\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/RoaringBitmap/roaring/roaring64\"\n\t\"github.com/olric-data/olric/internal/ramblock/entry\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/pkg/errors\"\n)\n\nconst (\n\t// MaxKeyLength is the maximum allowed key size in bytes.\n\tMaxKeyLength = 256\n\n\t// MetadataLength is the fixed number of bytes used to store per-entry metadata\n\t// (TTL + Timestamp + LastAccess + ValueLength + KeyLength = 8+8+8+4+1 = 29).\n\tMetadataLength = 29\n)\n\n// State represents the operational state of a Table.\ntype State uint8\n\nconst (\n\t// ReadWriteState indicates the table accepts both reads and writes.\n\tReadWriteState = State(iota + 1)\n\n\t// ReadOnlyState indicates the table only accepts read operations.\n\tReadOnlyState\n\n\t// RecycledState indicates the table has been reset and is ready for reuse.\n\tRecycledState\n)\n\nvar (\n\t// ErrNotEnoughSpace is returned when the table's pre-allocated memory buffer\n\t// does not have enough room to store a new entry.\n\tErrNotEnoughSpace = errors.New(\"not enough space\")\n\n\t// ErrHKeyNotFound is returned when the given hash key does not exist in the table.\n\tErrHKeyNotFound = errors.New(\"hkey not found\")\n)\n\n// Stats holds memory usage statistics and metadata for a Table.\ntype Stats struct {\n\t// Allocated is the total size of the pre-allocated memory buffer in bytes.\n\tAllocated uint64\n\n\t// Inuse is the number of bytes currently occupied by active entries.\n\tInuse uint64\n\n\t// Garbage is the number of bytes occupied by deleted entries that have not been reclaimed.\n\tGarbage uint64\n\n\t// Length is the number of active entries in the table.\n\tLength int\n\n\t// RecycledAt is the UnixNano timestamp of the last Reset call, or zero if never recycled.\n\tRecycledAt int64\n}\n\n// Table is an in-memory key-value store backed by a pre-allocated byte slice.\n// Entries are written sequentially into the buffer using a compact binary layout:\n//\n//\tKEY-LENGTH(uint8) | KEY(bytes) | TTL(uint64) | TIMESTAMP(uint64) | LASTACCESS(uint64) | VALUE-LENGTH(uint32) | VALUE(bytes)\n//\n// A hash key (uint64) to offset mapping provides O(1) lookups. Deleted entries\n// are tracked as garbage but not reclaimed until the table is compacted or recycled.\ntype Table struct {\n\tlastAccessMtx sync.RWMutex\n\tcoefficient   uint64\n\toffset        uint64\n\tallocated     uint64\n\tinuse         uint64\n\tgarbage       uint64\n\trecycledAt    int64\n\tstate         State\n\thkeys         map[uint64]uint64\n\toffsetIndex   *roaring64.Bitmap\n\tmemory        []byte\n}\n\n// New creates a new Table with a pre-allocated memory buffer of the given size in bytes.\nfunc New(size uint64) *Table {\n\tt := &Table{\n\t\thkeys:       make(map[uint64]uint64),\n\t\tallocated:   size,\n\t\toffsetIndex: roaring64.New(),\n\t\tstate:       ReadWriteState,\n\t}\n\t//  From builtin.go:\n\t//\n\t//  The size specifies the length. The capacity of the slice is\n\t//\tequal to its length. A second integer argument may be provided to\n\t//\tspecify a different capacity; it must be no smaller than the\n\t//\tlength. For example, make([]int, 0, 10) allocates an underlying array\n\t//\tof size 10 and returns a slice of length 0 and capacity 10 that is\n\t//\tbacked by this underlying array.\n\tt.memory = make([]byte, size)\n\treturn t\n}\n\n// SetCoefficient sets the coefficient value used for load-balancing and distribution purposes.\nfunc (t *Table) SetCoefficient(cf uint64) {\n\tt.coefficient = cf\n}\n\n// Coefficient returns the current coefficient value of the table.\nfunc (t *Table) Coefficient() uint64 {\n\treturn t.coefficient\n}\n\n// SetState sets the operational state of the table.\nfunc (t *Table) SetState(s State) {\n\tt.state = s\n}\n\n// State returns the current operational state of the table.\nfunc (t *Table) State() State {\n\treturn t.state\n}\n\n// PutRaw stores pre-encoded raw bytes into the table under the given hash key.\n// It copies the value directly into the memory buffer without any metadata encoding.\n// Returns ErrNotEnoughSpace if the buffer cannot accommodate the value.\nfunc (t *Table) PutRaw(hkey uint64, value []byte) error {\n\t// Check empty space on the allocated memory area.\n\tinuse := uint64(len(value))\n\tif inuse+t.offset >= t.allocated {\n\t\treturn ErrNotEnoughSpace\n\t}\n\tt.hkeys[hkey] = t.offset\n\tt.offsetIndex.Add(t.offset)\n\tcopy(t.memory[t.offset:], value)\n\tt.inuse += inuse\n\tt.offset += inuse\n\treturn nil\n}\n\n// Put stores a storage.Entry into the table under the given hash key. It encodes\n// the entry's key, TTL, timestamp, last access time and value into the memory buffer\n// using the following binary layout:\n//\n//\tKEY-LENGTH(uint8) | KEY(bytes) | TTL(uint64) | TIMESTAMP(uint64) | LASTACCESS(uint64) | VALUE-LENGTH(uint32) | VALUE(bytes)\n//\n// If the hash key already exists, the previous entry is deleted first. Returns\n// ErrNotEnoughSpace if the buffer cannot accommodate the entry, or storage.ErrKeyTooLarge\n// if the key exceeds MaxKeyLength.\nfunc (t *Table) Put(hkey uint64, value storage.Entry) error {\n\tif len(value.Key()) >= MaxKeyLength {\n\t\treturn storage.ErrKeyTooLarge\n\t}\n\n\t// Check empty space on the allocated memory area.\n\n\t// TTL + Timestamp + LastAccess + + value-Length + key-Length\n\tinuse := uint64(len(value.Key()) + len(value.Value()) + MetadataLength)\n\tif inuse+t.offset >= t.allocated {\n\t\treturn ErrNotEnoughSpace\n\t}\n\n\t// If we already have the key, delete it.\n\terr := t.Delete(hkey)\n\tif errors.Is(err, ErrHKeyNotFound) {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt.hkeys[hkey] = t.offset\n\tt.offsetIndex.Add(t.offset)\n\tt.inuse += inuse\n\n\t// Set key length. It's 1 byte.\n\tklen := uint8(len(value.Key()))\n\tcopy(t.memory[t.offset:], []byte{klen})\n\tt.offset++\n\n\t// Set the key.\n\tcopy(t.memory[t.offset:], value.Key())\n\tt.offset += uint64(len(value.Key()))\n\n\t// Set the TTL. It's 8 bytes.\n\tbinary.BigEndian.PutUint64(t.memory[t.offset:], uint64(value.TTL()))\n\tt.offset += 8\n\n\t// Set the Timestamp. It's 8 bytes.\n\tbinary.BigEndian.PutUint64(t.memory[t.offset:], uint64(value.Timestamp()))\n\tt.offset += 8\n\n\t// Set the last access. It's 8 bytes.\n\tbinary.BigEndian.PutUint64(t.memory[t.offset:], uint64(time.Now().UnixNano()))\n\tt.offset += 8\n\n\t// Set the value length. It's 4 bytes.\n\tbinary.BigEndian.PutUint32(t.memory[t.offset:], uint32(len(value.Value())))\n\tt.offset += 4\n\n\t// Set the value.\n\tcopy(t.memory[t.offset:], value.Value())\n\tt.offset += uint64(len(value.Value()))\n\treturn nil\n}\n\n// GetRaw returns the raw byte representation of the entry stored under the given hash key.\n// The returned slice is a copy and includes the full binary-encoded entry (key, metadata and value).\n// Returns ErrHKeyNotFound if the hash key does not exist.\nfunc (t *Table) GetRaw(hkey uint64) ([]byte, error) {\n\toffset, ok := t.hkeys[hkey]\n\tif !ok {\n\t\treturn nil, ErrHKeyNotFound\n\t}\n\tstart, end := offset, offset\n\n\t// In-memory structure:\n\t// 1                 | klen       | 8           | 8                  | 8                  | 4                    | vlen\n\t// KEY-LENGTH(uint8) | KEY(bytes) | TTL(uint64) | TIMESTAMP(uint64)  | LASTACCESS(uint64) | VALUE-LENGTH(uint64) | VALUE(bytes)\n\tklen := uint64(t.memory[end])\n\tend++       // One byte to keep key length\n\tend += klen // key length\n\tend += 8    // TTL\n\tend += 8    // Timestamp\n\tend += 8    // LastAccess\n\n\tvlen := binary.BigEndian.Uint32(t.memory[end : end+4])\n\tend += 4            // 4 bytes to keep value length\n\tend += uint64(vlen) // value length\n\n\t// Create a copy of the requested data.\n\trawval := make([]byte, end-start)\n\tcopy(rawval, t.memory[start:end])\n\treturn rawval, nil\n}\n\n// getRawKey reads and returns the raw key bytes from the memory buffer at the given offset.\nfunc (t *Table) getRawKey(offset uint64) ([]byte, error) {\n\tklen := uint64(t.memory[offset])\n\toffset++\n\treturn t.memory[offset : offset+klen], nil\n}\n\n// GetRawKey returns the raw key bytes for the given hash key.\n// Returns ErrHKeyNotFound if the hash key does not exist.\nfunc (t *Table) GetRawKey(hkey uint64) ([]byte, error) {\n\toffset, ok := t.hkeys[hkey]\n\tif !ok {\n\t\treturn nil, ErrHKeyNotFound\n\t}\n\n\treturn t.getRawKey(offset)\n}\n\n// GetKey returns the key as a string for the given hash key.\n// Returns ErrHKeyNotFound if the hash key does not exist.\nfunc (t *Table) GetKey(hkey uint64) (string, error) {\n\traw, err := t.GetRawKey(hkey)\n\tif raw == nil {\n\t\treturn \"\", err\n\t}\n\treturn string(raw), err\n}\n\n// GetTTL returns the TTL value in nanoseconds for the given hash key.\n// Returns ErrHKeyNotFound if the hash key does not exist.\nfunc (t *Table) GetTTL(hkey uint64) (int64, error) {\n\toffset, ok := t.hkeys[hkey]\n\tif !ok {\n\t\treturn 0, ErrHKeyNotFound\n\t}\n\n\tklen := uint64(t.memory[offset])\n\toffset++\n\toffset += klen\n\n\treturn int64(binary.BigEndian.Uint64(t.memory[offset : offset+8])), nil\n}\n\n// GetLastAccess returns the last access timestamp in nanoseconds for the given hash key.\n// Returns ErrHKeyNotFound if the hash key does not exist.\nfunc (t *Table) GetLastAccess(hkey uint64) (int64, error) {\n\toffset, ok := t.hkeys[hkey]\n\tif !ok {\n\t\treturn 0, ErrHKeyNotFound\n\t}\n\n\tklen := uint64(t.memory[offset])\n\toffset++       // Key length\n\toffset += klen // Key's itself\n\toffset += 8    // TTL\n\toffset += 8    // Timestamp\n\n\treturn int64(binary.BigEndian.Uint64(t.memory[offset : offset+8])), nil\n}\n\n// get decodes a storage.Entry from the memory buffer at the given offset and updates\n// the entry's last access time to the current time. It is used internally by Scan methods.\nfunc (t *Table) get(offset uint64) storage.Entry {\n\te := &entry.Entry{}\n\t// In-memory structure:\n\t//\n\t// KEY-LENGTH(uint8) | KEY(bytes) | TTL(uint64) | TIMESTAMP(uint64) | LASTACCESS(uint64) | VALUE-LENGTH(uint32) | VALUE(bytes)\n\tklen := uint64(t.memory[offset])\n\toffset++\n\n\te.SetKey(string(t.memory[offset : offset+klen]))\n\toffset += klen\n\n\te.SetTTL(int64(binary.BigEndian.Uint64(t.memory[offset : offset+8])))\n\toffset += 8\n\n\te.SetTimestamp(int64(binary.BigEndian.Uint64(t.memory[offset : offset+8])))\n\toffset += 8\n\n\t// Every SCAN call updates the last access time. We have to serialize the access to that field.\n\tt.lastAccessMtx.RLock()\n\te.SetLastAccess(int64(binary.BigEndian.Uint64(t.memory[offset : offset+8])))\n\tt.lastAccessMtx.RUnlock()\n\n\t// Update the last access field\n\tlastAccess := uint64(time.Now().UnixNano())\n\tt.lastAccessMtx.Lock()\n\tbinary.BigEndian.PutUint64(t.memory[offset:], lastAccess)\n\tt.lastAccessMtx.Unlock()\n\toffset += 8\n\n\tvlen := binary.BigEndian.Uint32(t.memory[offset : offset+4])\n\toffset += 4\n\te.SetValue(t.memory[offset : offset+uint64(vlen)])\n\treturn e\n}\n\n// Get retrieves the storage.Entry for the given hash key and updates the entry's\n// last access time. Returns ErrHKeyNotFound if the hash key does not exist.\nfunc (t *Table) Get(hkey uint64) (storage.Entry, error) {\n\toffset, ok := t.hkeys[hkey]\n\tif !ok {\n\t\treturn nil, ErrHKeyNotFound\n\t}\n\n\treturn t.get(offset), nil\n}\n\n// Delete removes the entry associated with the given hash key from the table.\n// The occupied memory is marked as garbage but not reclaimed. Returns\n// ErrHKeyNotFound if the hash key does not exist.\nfunc (t *Table) Delete(hkey uint64) error {\n\toffset, ok := t.hkeys[hkey]\n\tif !ok {\n\t\t// Try the previous tables.\n\t\treturn ErrHKeyNotFound\n\t}\n\tvar garbage uint64\n\n\t// key, 1 byte for key size, klen for key's actual length.\n\tklen := uint64(t.memory[offset])\n\n\t// Delete the offset from offsetIndex\n\tt.offsetIndex.Remove(offset)\n\n\toffset += 1 + klen\n\tgarbage += 1 + klen\n\n\t// TTL, skip it.\n\toffset += 8\n\tgarbage += 8\n\n\t// Timestamp, skip it.\n\toffset += 8\n\tgarbage += 8\n\n\t// LastAccess, skip it.\n\toffset += 8\n\tgarbage += 8\n\n\t// value len and its header.\n\tvlen := binary.BigEndian.Uint32(t.memory[offset : offset+4])\n\tgarbage += 4 + uint64(vlen)\n\n\t// Delete it from metadata\n\tdelete(t.hkeys, hkey)\n\n\tt.garbage += garbage\n\tt.inuse -= garbage\n\treturn nil\n}\n\n// UpdateTTL updates the TTL and timestamp fields of the entry identified by the\n// given hash key in-place, and refreshes its last access time. Returns\n// ErrHKeyNotFound if the hash key does not exist.\nfunc (t *Table) UpdateTTL(hkey uint64, value storage.Entry) error {\n\toffset, ok := t.hkeys[hkey]\n\tif !ok {\n\t\treturn ErrHKeyNotFound\n\t}\n\n\t// key, 1 byte for key size, klen for key's actual length.\n\tklen := uint64(t.memory[offset])\n\toffset += 1 + klen\n\n\t// Set the new TTL. It's 8 bytes.\n\tbinary.BigEndian.PutUint64(t.memory[offset:], uint64(value.TTL()))\n\toffset += 8\n\n\t// Set the new Timestamp. It's 8 bytes.\n\tbinary.BigEndian.PutUint64(t.memory[offset:], uint64(value.Timestamp()))\n\n\toffset += 8\n\n\t// Update the last access field\n\tbinary.BigEndian.PutUint64(t.memory[offset:], uint64(time.Now().UnixNano()))\n\n\treturn nil\n}\n\n// Check reports whether the given hash key exists in the table.\nfunc (t *Table) Check(hkey uint64) bool {\n\t_, ok := t.hkeys[hkey]\n\treturn ok\n}\n\n// Stats returns the current memory usage statistics for the table.\nfunc (t *Table) Stats() Stats {\n\treturn Stats{\n\t\tAllocated:  t.allocated,\n\t\tInuse:      t.inuse,\n\t\tGarbage:    t.garbage,\n\t\tLength:     len(t.hkeys),\n\t\tRecycledAt: t.recycledAt,\n\t}\n}\n\n// Range iterates over all entries in the table, calling f for each one.\n// If f returns false, iteration stops. The iteration order is non-deterministic.\nfunc (t *Table) Range(f func(hkey uint64, e storage.Entry) bool) {\n\tfor hkey := range t.hkeys {\n\t\te, err := t.Get(hkey)\n\t\tif errors.Is(err, ErrHKeyNotFound) {\n\t\t\tpanic(fmt.Errorf(\"hkey: %d found in index, but Get could not find it\", hkey))\n\t\t}\n\n\t\tif !f(hkey, e) {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n// RangeHKey iterates over all hash keys in the table without decoding entries.\n// If f returns false, iteration stops. The iteration order is non-deterministic.\nfunc (t *Table) RangeHKey(f func(hkey uint64) bool) {\n\tfor hkey := range t.hkeys {\n\t\tif !f(hkey) {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n// Reset clears all entries and metadata, resets memory usage counters, and\n// transitions the table to RecycledState. The underlying memory buffer is\n// retained for reuse.\nfunc (t *Table) Reset() {\n\tif len(t.hkeys) != 0 {\n\t\tt.hkeys = make(map[uint64]uint64)\n\t}\n\tt.offsetIndex = roaring64.New()\n\tt.SetState(RecycledState)\n\tt.inuse = 0\n\tt.garbage = 0\n\tt.offset = 0\n\tt.coefficient = 0\n\tt.recycledAt = time.Now().UnixNano()\n}\n\n// Scan performs a cursor-based iteration over the table entries. Starting from\n// the given cursor position, it calls f for up to count entries. It returns\n// the next cursor to resume scanning, or 0 when all entries have been visited.\n// If f returns false, iteration stops early.\nfunc (t *Table) Scan(cursor uint64, count int, f func(e storage.Entry) bool) (uint64, error) {\n\tit := t.offsetIndex.Iterator()\n\tif cursor != 0 {\n\t\tit.AdvanceIfNeeded(cursor)\n\t}\n\tvar num int\n\tfor it.HasNext() && num < count {\n\t\toffset := it.Next()\n\t\te := t.get(offset)\n\t\tif !f(e) {\n\t\t\tbreak\n\t\t}\n\t\tcursor = offset + 1\n\t\tnum++\n\t}\n\n\tif !it.HasNext() {\n\t\t// end of the scan\n\t\tcursor = 0\n\t}\n\n\treturn cursor, nil\n}\n\n// ScanRegexMatch performs a cursor-based iteration like Scan, but only yields\n// entries whose keys match the given regular expression. Returns the next cursor\n// to resume scanning, or 0 when all entries have been visited. Returns an error\n// if the regular expression is invalid.\nfunc (t *Table) ScanRegexMatch(cursor uint64, expr string, count int, f func(e storage.Entry) bool) (uint64, error) {\n\tr, err := regexp.Compile(expr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tit := t.offsetIndex.Iterator()\n\tif cursor != 0 {\n\t\tit.AdvanceIfNeeded(cursor)\n\t}\n\n\tvar num int\n\tfor it.HasNext() && num < count {\n\t\toffset := it.Next()\n\n\t\tkey, _ := t.getRawKey(offset)\n\t\tif !r.Match(key) {\n\t\t\tcontinue\n\t\t}\n\n\t\te := t.get(offset)\n\t\tif !f(e) {\n\t\t\tbreak\n\t\t}\n\t\tcursor = offset + 1\n\t\tnum++\n\t}\n\n\tif !it.HasNext() {\n\t\t// end of the scan\n\t\tcursor = 0\n\t}\n\treturn cursor, nil\n}\n"
  },
  {
    "path": "internal/ramblock/table/table_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage table\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/cespare/xxhash/v2\"\n\t\"github.com/olric-data/olric/internal/ramblock/entry\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nvar key = \"foobar\"\n\nconst hkey uint64 = 18071988\n\nfunc setupTable() (*Table, storage.Entry) {\n\ttb := New(1024)\n\te := entry.New()\n\te.SetKey(key)\n\te.SetValue([]byte(\"foobar-value\"))\n\treturn tb, e\n}\n\nfunc TestTable_Put(t *testing.T) {\n\ttb, e := setupTable()\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n}\n\nfunc TestTable_Get(t *testing.T) {\n\ttb, e := setupTable()\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tvalue, err := tb.Get(hkey)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, e.Key(), value.Key())\n\trequire.Equal(t, e.Value(), value.Value())\n\trequire.Equal(t, e.TTL(), value.TTL())\n\trequire.Equal(t, int64(0), e.LastAccess())\n\trequire.NotEqual(t, int64(0), value.LastAccess())\n}\n\nfunc TestTable_Delete(t *testing.T) {\n\ttb, e := setupTable()\n\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\terr = tb.Delete(hkey)\n\trequire.NoError(t, err)\n\n\t_, err = tb.Get(hkey)\n\trequire.ErrorIs(t, ErrHKeyNotFound, err)\n}\n\nfunc TestTable_Check(t *testing.T) {\n\ttb, e := setupTable()\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\trequire.True(t, tb.Check(hkey))\n\n\terr = tb.Delete(hkey)\n\trequire.NoError(t, err)\n\n\trequire.False(t, tb.Check(hkey))\n}\n\nfunc TestTable_PutRaw(t *testing.T) {\n\ttb, e := setupTable()\n\n\terr := tb.PutRaw(hkey, e.Encode())\n\trequire.NoError(t, err)\n\n\tvalue, err := tb.Get(hkey)\n\trequire.NoError(t, err)\n\trequire.Equal(t, e, value)\n}\n\nfunc TestTable_GetRaw(t *testing.T) {\n\ttb, e := setupTable()\n\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\traw, err := tb.GetRaw(hkey)\n\trequire.NoError(t, err)\n\textracted := entry.New()\n\textracted.Decode(raw)\n\n\trequire.Equal(t, e.Key(), extracted.Key())\n\trequire.Equal(t, e.Value(), extracted.Value())\n\trequire.Equal(t, e.TTL(), extracted.TTL())\n\trequire.Equal(t, int64(0), e.LastAccess())\n\trequire.NotEqual(t, int64(0), extracted.LastAccess())\n}\n\nfunc TestTable_GetRawKey(t *testing.T) {\n\ttb, e := setupTable()\n\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\trawKey, err := tb.GetRawKey(hkey)\n\trequire.NoError(t, err)\n\trequire.Equal(t, key, string(rawKey))\n}\n\nfunc TestTable_GetKey(t *testing.T) {\n\ttb, e := setupTable()\n\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tk, err := tb.GetKey(hkey)\n\trequire.NoError(t, err)\n\trequire.Equal(t, key, k)\n}\n\nfunc TestTable_SetState(t *testing.T) {\n\ttb, _ := setupTable()\n\ttb.SetState(ReadOnlyState)\n\trequire.Equal(t, ReadOnlyState, tb.State())\n}\n\nfunc TestTable_GetTTL(t *testing.T) {\n\ttb, e := setupTable()\n\tttl := time.Now().UnixNano()\n\te.SetTTL(ttl)\n\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tvalue, err := tb.GetTTL(hkey)\n\trequire.NoError(t, err)\n\trequire.Equal(t, ttl, value)\n}\n\nfunc TestTable_GetLastAccess(t *testing.T) {\n\ttb, e := setupTable()\n\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tvalue, err := tb.GetLastAccess(hkey)\n\trequire.NoError(t, err)\n\trequire.NotEqual(t, 0, value)\n}\n\nfunc TestTable_UpdateTTL(t *testing.T) {\n\ttb, e := setupTable()\n\tttl := time.Now().UnixNano()\n\te.SetTTL(ttl)\n\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\te.SetTTL(ttl + 1000)\n\terr = tb.UpdateTTL(hkey, e)\n\trequire.NoError(t, err)\n\n\tvalue, err := tb.GetTTL(hkey)\n\trequire.NoError(t, err)\n\trequire.Equal(t, ttl+1000, value)\n}\n\nfunc TestTable_UpdateTTL_Update_LastAccess(t *testing.T) {\n\ttb, e := setupTable()\n\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tlastAccessOne, err := tb.GetLastAccess(hkey)\n\trequire.NoError(t, err)\n\n\t<-time.After(time.Millisecond)\n\n\tttl := time.Now().UnixNano() + 1000\n\te.SetTTL(ttl)\n\n\terr = tb.UpdateTTL(hkey, e)\n\trequire.NoError(t, err)\n\n\tlastAccessTwo, err := tb.GetLastAccess(hkey)\n\trequire.NoError(t, err)\n\n\trequire.Greater(t, lastAccessTwo, lastAccessOne)\n}\n\nfunc TestTable_State(t *testing.T) {\n\ttb, _ := setupTable()\n\trequire.Equal(t, ReadWriteState, tb.State())\n}\n\nfunc TestTable_Range(t *testing.T) {\n\tdata := make(map[uint64]storage.Entry)\n\n\ttb := New(1 << 20)\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\tikey := fmt.Sprintf(\"key-%d\", i)\n\t\tidata := []byte(fmt.Sprintf(\"value-%d\", i))\n\t\tihkey := xxhash.Sum64String(ikey)\n\t\te.SetKey(ikey)\n\t\te.SetValue(idata)\n\t\tdata[ihkey] = e\n\n\t\terr := tb.Put(ihkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\ttb.Range(func(hk uint64, e storage.Entry) bool {\n\t\titem, ok := data[hk]\n\t\trequire.True(t, ok)\n\n\t\trequire.Equal(t, item.Key(), e.Key())\n\t\trequire.Equal(t, item.Value(), e.Value())\n\t\trequire.Equal(t, item.TTL(), e.TTL())\n\t\trequire.Equal(t, int64(0), item.LastAccess())\n\t\trequire.NotEqual(t, int64(0), e.LastAccess())\n\n\t\treturn true\n\t})\n}\n\nfunc TestTable_Stats(t *testing.T) {\n\ttb := New(1 << 20)\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\tikey := fmt.Sprintf(\"key-%d\", i)\n\t\tidata := []byte(fmt.Sprintf(\"value-%d\", i))\n\t\tihkey := xxhash.Sum64String(ikey)\n\t\te.SetKey(ikey)\n\t\te.SetValue(idata)\n\t\terr := tb.Put(ihkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\ts := tb.Stats()\n\trequire.Equal(t, uint64(1<<20), s.Allocated)\n\trequire.Equal(t, 100, s.Length)\n\trequire.Equal(t, uint64(4280), s.Inuse)\n\trequire.Equal(t, uint64(0), s.Garbage)\n\n\tfor i := 0; i < 100; i++ {\n\t\tikey := fmt.Sprintf(\"key-%d\", i)\n\t\tihkey := xxhash.Sum64String(ikey)\n\t\terr := tb.Delete(ihkey)\n\t\trequire.NoError(t, err)\n\t}\n\n\ts = tb.Stats()\n\trequire.Equal(t, uint64(1<<20), s.Allocated)\n\trequire.Equal(t, 0, s.Length)\n\trequire.Equal(t, uint64(0), s.Inuse)\n\trequire.Equal(t, uint64(4280), s.Garbage)\n}\n\nfunc TestTable_Reset(t *testing.T) {\n\ttb := New(1 << 20)\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\tikey := fmt.Sprintf(\"key-%d\", i)\n\t\tidata := []byte(fmt.Sprintf(\"value-%d\", i))\n\t\tihkey := xxhash.Sum64String(ikey)\n\t\te.SetKey(ikey)\n\t\te.SetValue(idata)\n\t\terr := tb.Put(ihkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\ttb.Reset()\n\n\tstats := tb.Stats()\n\trequire.Equal(t, RecycledState, tb.State())\n\trequire.Equal(t, uint64(0), stats.Garbage)\n\trequire.Equal(t, uint64(0), stats.Inuse)\n\trequire.Equal(t, tb.allocated, stats.Allocated)\n\trequire.Equal(t, 0, stats.Length)\n\n\t// Verify Scan returns no entries after Reset\n\tvar count int\n\tcursor, err := tb.Scan(0, 100, func(e storage.Entry) bool {\n\t\tcount++\n\t\treturn true\n\t})\n\trequire.NoError(t, err)\n\trequire.Equal(t, uint64(0), cursor)\n\trequire.Equal(t, 0, count)\n}\n\nfunc TestTable_Reset_RecycleScenario(t *testing.T) {\n\ttb := New(1 << 20)\n\n\t// Phase 1: Put initial entries\n\tfor i := 0; i < 50; i++ {\n\t\te := entry.New()\n\t\tikey := fmt.Sprintf(\"old-key-%d\", i)\n\t\tidata := []byte(fmt.Sprintf(\"old-value-%d\", i))\n\t\tihkey := xxhash.Sum64String(ikey)\n\t\te.SetKey(ikey)\n\t\te.SetValue(idata)\n\t\terr := tb.Put(ihkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// Phase 2: Reset (simulates compaction recycling)\n\ttb.Reset()\n\ttb.SetState(ReadWriteState) // mirrors makeTable() behavior\n\n\t// Phase 3: Put new entries into the recycled table\n\tnewKeys := make(map[string]string)\n\tfor i := 0; i < 10; i++ {\n\t\te := entry.New()\n\t\tikey := fmt.Sprintf(\"new-key-%d\", i)\n\t\tidata := fmt.Sprintf(\"new-value-%d\", i)\n\t\tihkey := xxhash.Sum64String(ikey)\n\t\te.SetKey(ikey)\n\t\te.SetValue([]byte(idata))\n\t\terr := tb.Put(ihkey, e)\n\t\trequire.NoError(t, err)\n\t\tnewKeys[ikey] = idata\n\t}\n\n\t// Phase 4: Scan and verify ONLY new entries are returned\n\tscannedKeys := make(map[string]string)\n\tvar cursor uint64\n\tvar err error\n\tfor {\n\t\tcursor, err = tb.Scan(cursor, 10, func(e storage.Entry) bool {\n\t\t\tscannedKeys[e.Key()] = string(e.Value())\n\t\t\treturn true\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tif cursor == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\trequire.Equal(t, len(newKeys), len(scannedKeys))\n\tfor k, v := range newKeys {\n\t\tsv, ok := scannedKeys[k]\n\t\trequire.True(t, ok, \"expected key %s not found in scan results\", k)\n\t\trequire.Equal(t, v, sv)\n\t}\n}\n\nfunc TestTable_Scan(t *testing.T) {\n\n\ttb := New(1 << 20)\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\tkey := fmt.Sprintf(\"key-%d\", i)\n\t\tdata := []byte(fmt.Sprintf(\"value-%d\", i))\n\t\thkey := xxhash.Sum64String(key)\n\t\te.SetKey(key)\n\t\te.SetValue(data)\n\n\t\terr := tb.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tvar err error\n\tvar cursor uint64\n\tfor {\n\t\tcursor, err = tb.Scan(cursor, 10, func(e storage.Entry) bool {\n\t\t\treturn true\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tif cursor == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc TestTable_ScanRegexMatch(t *testing.T) {\n\ttb := New(1 << 20)\n\tfor i := 0; i < 100; i++ {\n\t\tif i%2 == 0 {\n\t\t\tkey = \"even:\" + strconv.Itoa(i)\n\t\t} else {\n\t\t\tkey = \"odd:\" + strconv.Itoa(i)\n\t\t}\n\t\te := entry.New()\n\t\tdata := []byte(fmt.Sprintf(\"value-%d\", i))\n\t\thkey := xxhash.Sum64String(key)\n\t\te.SetKey(key)\n\t\te.SetValue(data)\n\n\t\terr := tb.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\tvar err error\n\tvar num int\n\tvar count int\n\tvar cursor uint64\n\tfor {\n\t\tnum++\n\t\tcursor, err = tb.ScanRegexMatch(cursor, \"even:\", 10, func(e storage.Entry) bool {\n\t\t\tcount++\n\t\t\treturn true\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tif cursor == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\trequire.Equal(t, 6, num)\n\trequire.Equal(t, 50, count)\n}\n\nfunc TestTable_Put_ErrKeyTooLarge(t *testing.T) {\n\ttb := New(1 << 20)\n\te := entry.New()\n\tlongKey := strings.Repeat(\"k\", MaxKeyLength)\n\te.SetKey(longKey)\n\te.SetValue([]byte(\"value\"))\n\n\terr := tb.Put(hkey, e)\n\trequire.ErrorIs(t, err, storage.ErrKeyTooLarge)\n}\n\nfunc TestTable_Put_OverwriteExistingKey(t *testing.T) {\n\ttb := New(1024)\n\te1 := entry.New()\n\te1.SetKey(\"mykey\")\n\te1.SetValue([]byte(\"old-value\"))\n\n\terr := tb.Put(hkey, e1)\n\trequire.NoError(t, err)\n\n\tstatsBefore := tb.Stats()\n\n\te2 := entry.New()\n\te2.SetKey(\"mykey\")\n\te2.SetValue([]byte(\"new-value\"))\n\n\terr = tb.Put(hkey, e2)\n\trequire.NoError(t, err)\n\n\tgot, err := tb.Get(hkey)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"new-value\", string(got.Value()))\n\trequire.Equal(t, \"mykey\", got.Key())\n\n\tstatsAfter := tb.Stats()\n\trequire.Equal(t, 1, statsAfter.Length)\n\trequire.Greater(t, statsAfter.Garbage, statsBefore.Garbage)\n}\n\nfunc TestTable_PutRaw_ErrNotEnoughSpace(t *testing.T) {\n\ttb := New(16)\n\tdata := make([]byte, 32)\n\terr := tb.PutRaw(hkey, data)\n\trequire.ErrorIs(t, err, ErrNotEnoughSpace)\n}\n\nfunc TestTable_Put_ErrNotEnoughSpace(t *testing.T) {\n\ttb := New(16)\n\te := entry.New()\n\te.SetKey(\"mykey\")\n\te.SetValue([]byte(\"some-value-that-is-too-large\"))\n\n\terr := tb.Put(hkey, e)\n\trequire.ErrorIs(t, err, ErrNotEnoughSpace)\n}\n\nfunc TestTable_GetRaw_ErrHKeyNotFound(t *testing.T) {\n\ttb := New(1024)\n\t_, err := tb.GetRaw(hkey)\n\trequire.ErrorIs(t, err, ErrHKeyNotFound)\n}\n\nfunc TestTable_Delete_ErrHKeyNotFound(t *testing.T) {\n\ttb := New(1024)\n\terr := tb.Delete(hkey)\n\trequire.ErrorIs(t, err, ErrHKeyNotFound)\n}\n\nfunc TestTable_UpdateTTL_ErrHKeyNotFound(t *testing.T) {\n\ttb := New(1024)\n\te := entry.New()\n\te.SetTTL(time.Now().UnixNano())\n\terr := tb.UpdateTTL(hkey, e)\n\trequire.ErrorIs(t, err, ErrHKeyNotFound)\n}\n\nfunc TestTable_RangeHKey(t *testing.T) {\n\ttb := New(1 << 20)\n\texpected := make(map[uint64]struct{})\n\tfor i := 0; i < 50; i++ {\n\t\te := entry.New()\n\t\tikey := fmt.Sprintf(\"key-%d\", i)\n\t\tihkey := xxhash.Sum64String(ikey)\n\t\te.SetKey(ikey)\n\t\te.SetValue([]byte(fmt.Sprintf(\"value-%d\", i)))\n\t\terr := tb.Put(ihkey, e)\n\t\trequire.NoError(t, err)\n\t\texpected[ihkey] = struct{}{}\n\t}\n\n\tcollected := make(map[uint64]struct{})\n\ttb.RangeHKey(func(hk uint64) bool {\n\t\tcollected[hk] = struct{}{}\n\t\treturn true\n\t})\n\trequire.Equal(t, expected, collected)\n\n\t// Test early stop: callback returns false after first call\n\tvar count int\n\ttb.RangeHKey(func(hk uint64) bool {\n\t\tcount++\n\t\treturn false\n\t})\n\trequire.Equal(t, 1, count)\n}\n\nfunc TestTable_ScanRegexMatch_InvalidRegex(t *testing.T) {\n\ttb := New(1024)\n\te := entry.New()\n\te.SetKey(\"test\")\n\te.SetValue([]byte(\"value\"))\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\t_, err = tb.ScanRegexMatch(0, \"[invalid\", 10, func(e storage.Entry) bool {\n\t\treturn true\n\t})\n\trequire.Error(t, err)\n}\n\nfunc TestTable_Coefficient(t *testing.T) {\n\ttb := New(1024)\n\trequire.Equal(t, uint64(0), tb.Coefficient())\n\n\ttb.SetCoefficient(42)\n\trequire.Equal(t, uint64(42), tb.Coefficient())\n\n\ttb.SetCoefficient(0)\n\trequire.Equal(t, uint64(0), tb.Coefficient())\n}\n\nfunc TestTable_ScanRegexMatch_SingleMatch(t *testing.T) {\n\ttb := New(1 << 20)\n\tfor i := 0; i < 100; i++ {\n\t\te := entry.New()\n\t\tkey := fmt.Sprintf(\"key-%d\", i)\n\t\tdata := []byte(fmt.Sprintf(\"value-%d\", i))\n\t\thkey := xxhash.Sum64String(key)\n\t\te.SetKey(key)\n\t\te.SetValue(data)\n\n\t\terr := tb.Put(hkey, e)\n\t\trequire.NoError(t, err)\n\t}\n\n\te := entry.New()\n\te.SetKey(\"even:200\")\n\te.SetTTL(123123)\n\te.SetValue([]byte(\"my-value\"))\n\te.SetTimestamp(time.Now().UnixNano())\n\thkey := xxhash.Sum64([]byte(e.Key()))\n\terr := tb.Put(hkey, e)\n\trequire.NoError(t, err)\n\n\tvar num int\n\tvar count int\n\tvar cursor uint64\n\tfor {\n\t\tnum++\n\t\tcursor, err = tb.ScanRegexMatch(cursor, \"even:\", 10, func(e storage.Entry) bool {\n\t\t\tcount++\n\t\t\trequire.Equal(t, \"even:200\", e.Key())\n\t\t\trequire.Equal(t, \"my-value\", string(e.Value()))\n\t\t\treturn true\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tif cursor == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\trequire.Equal(t, 1, num)\n\trequire.Equal(t, 1, count)\n}\n"
  },
  {
    "path": "internal/ramblock/transport.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage ramblock\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/olric-data/olric/internal/ramblock/table\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\ntype transferIterator struct {\n\tstorage *RamBlock\n}\n\nfunc (t *transferIterator) Next() bool {\n\treturn len(t.storage.tables) != 0\n}\n\nfunc (t *transferIterator) Drop(index int) error {\n\tif len(t.storage.tables) == 0 {\n\t\treturn fmt.Errorf(\"there is no table to drop\")\n\t}\n\n\ttb := t.storage.tables[index]\n\tt.storage.tables = append(t.storage.tables[:index], t.storage.tables[index+1:]...)\n\tdelete(t.storage.tablesByCoefficient, tb.Coefficient())\n\n\treturn nil\n}\n\nfunc (t *transferIterator) Export() ([]byte, int, error) {\n\tfor index, t := range t.storage.tables {\n\t\tif t.State() == table.RecycledState {\n\t\t\tcontinue\n\t\t}\n\n\t\tdata, err := table.Encode(t)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\treturn data, index, nil\n\t}\n\treturn nil, 0, io.EOF\n}\n\nfunc (rb *RamBlock) Import(data []byte, f func(uint64, storage.Entry) error) error {\n\ttb, err := table.Decode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttb.Range(func(hkey uint64, e storage.Entry) bool {\n\t\treturn f(hkey, e) == nil\n\t})\n\treturn err\n}\n\nfunc (rb *RamBlock) TransferIterator() storage.TransferIterator {\n\treturn &transferIterator{\n\t\tstorage: rb,\n\t}\n}\n"
  },
  {
    "path": "internal/resp/encoder.go",
    "content": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n\n// * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n// * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage resp\n\nimport (\n\t\"encoding\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/util\"\n)\n\ntype encoder interface {\n\tio.Writer\n\tio.ByteWriter\n\tWriteString(s string) (n int, err error)\n}\n\ntype Encoder struct {\n\tencoder\n\n\tlenBuf []byte\n\tnumBuf []byte\n}\n\nfunc New(e encoder) *Encoder {\n\treturn &Encoder{\n\t\tencoder: e,\n\n\t\tlenBuf: make([]byte, 64),\n\t\tnumBuf: make([]byte, 64),\n\t}\n}\n\nfunc (e *Encoder) Encode(v interface{}) error {\n\tswitch v := v.(type) {\n\tcase nil:\n\t\treturn e.string(\"\")\n\tcase string:\n\t\treturn e.string(v)\n\tcase []byte:\n\t\treturn e.bytes(v)\n\tcase int:\n\t\treturn e.int(int64(v))\n\tcase int8:\n\t\treturn e.int(int64(v))\n\tcase int16:\n\t\treturn e.int(int64(v))\n\tcase int32:\n\t\treturn e.int(int64(v))\n\tcase int64:\n\t\treturn e.int(v)\n\tcase uint:\n\t\treturn e.uint(uint64(v))\n\tcase uint8:\n\t\treturn e.uint(uint64(v))\n\tcase uint16:\n\t\treturn e.uint(uint64(v))\n\tcase uint32:\n\t\treturn e.uint(uint64(v))\n\tcase uint64:\n\t\treturn e.uint(v)\n\tcase float32:\n\t\treturn e.float(float64(v))\n\tcase float64:\n\t\treturn e.float(v)\n\tcase bool:\n\t\tif v {\n\t\t\treturn e.int(1)\n\t\t}\n\t\treturn e.int(0)\n\tcase time.Time:\n\t\te.numBuf = v.AppendFormat(e.numBuf[:0], time.RFC3339Nano)\n\t\treturn e.bytes(e.numBuf)\n\tcase time.Duration:\n\t\treturn e.int(v.Nanoseconds())\n\tcase encoding.BinaryMarshaler:\n\t\tb, err := v.MarshalBinary()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn e.bytes(b)\n\tdefault:\n\t\treturn fmt.Errorf(\n\t\t\t\"olric: can't marshal %T (implement encoding.BinaryMarshaler)\", v)\n\t}\n}\n\nfunc (e *Encoder) bytes(b []byte) error {\n\tif _, err := e.Write(b); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *Encoder) string(s string) error {\n\treturn e.bytes(util.StringToBytes(s))\n}\n\nfunc (e *Encoder) uint(n uint64) error {\n\te.numBuf = strconv.AppendUint(e.numBuf[:0], n, 10)\n\treturn e.bytes(e.numBuf)\n}\n\nfunc (e *Encoder) int(n int64) error {\n\te.numBuf = strconv.AppendInt(e.numBuf[:0], n, 10)\n\treturn e.bytes(e.numBuf)\n}\n\nfunc (e *Encoder) float(f float64) error {\n\te.numBuf = strconv.AppendFloat(e.numBuf[:0], f, 'f', -1, 64)\n\treturn e.bytes(e.numBuf)\n}\n"
  },
  {
    "path": "internal/resp/encoder_test.go",
    "content": "package resp\n\nimport (\n\t\"bytes\"\n\t\"encoding\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\ntype MyType struct{}\n\nvar _ encoding.BinaryMarshaler = (*MyType)(nil)\n\nfunc (t *MyType) MarshalBinary() ([]byte, error) {\n\treturn []byte(\"hello\"), nil\n}\n\nfunc (t *MyType) UnmarshalBinary(data []byte) error {\n\tif !bytes.Equal([]byte(\"hello\"), data) {\n\t\treturn fmt.Errorf(\"not equal\")\n\t}\n\treturn nil\n}\n\nfunc TestWriter_WriteArg(t *testing.T) {\n\tbuf := bytes.NewBuffer(nil)\n\tw := New(buf)\n\n\tt.Run(\"uint64\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\t\tvalue := uint64(345353)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(uint64)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, uint64(345353), *scannedValue)\n\t})\n\n\tt.Run(\"nil\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\terr := w.Encode(nil)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(string)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, \"\", *scannedValue)\n\t})\n\n\tt.Run(\"string\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\terr := w.Encode(\"foobar\")\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(string)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, \"foobar\", *scannedValue)\n\t})\n\n\tt.Run(\"byte slice\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\terr := w.Encode([]byte(\"foobar\"))\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new([]byte)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, []byte(\"foobar\"), *scannedValue)\n\t})\n\n\tt.Run(\"int\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := 345353\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(int)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, 345353, *scannedValue)\n\t})\n\n\tt.Run(\"int8\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := int8(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(int8)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int8(2), *scannedValue)\n\t})\n\n\tt.Run(\"int16\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := int16(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(int16)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int16(2), *scannedValue)\n\t})\n\n\tt.Run(\"int32\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := int32(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(int32)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int32(2), *scannedValue)\n\t})\n\n\tt.Run(\"int64\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := int64(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(int64)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(2), *scannedValue)\n\t})\n\n\tt.Run(\"uint\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := uint(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(uint)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, uint(2), *scannedValue)\n\t})\n\n\tt.Run(\"uint8\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := uint8(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(uint8)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, uint8(2), *scannedValue)\n\t})\n\n\tt.Run(\"uint16\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := uint16(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(uint16)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, uint16(2), *scannedValue)\n\t})\n\n\tt.Run(\"uint32\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := uint32(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(uint32)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, uint32(2), *scannedValue)\n\t})\n\n\tt.Run(\"uint64\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := uint64(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(uint64)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, uint64(2), *scannedValue)\n\t})\n\n\tt.Run(\"float32\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := float32(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(float32)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, float32(2), *scannedValue)\n\t})\n\n\tt.Run(\"float64\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := float64(2)\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(float64)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, float64(2), *scannedValue)\n\t})\n\n\tt.Run(\"bool\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := true\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(bool)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, true, *scannedValue)\n\t})\n\n\tt.Run(\"time.Time\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := time.Now()\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(time.Time)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t})\n\n\tt.Run(\"time.Duration\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvalue := time.Second\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(time.Duration)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, time.Second, *scannedValue)\n\t})\n\n\tt.Run(\"encoding.BinaryMarshaler\", func(t *testing.T) {\n\t\tdefer buf.Reset()\n\n\t\tvar value encoding.BinaryMarshaler = &MyType{}\n\t\terr := w.Encode(value)\n\t\trequire.NoError(t, err)\n\n\t\tscannedValue := new(MyType)\n\t\terr = Scan(buf.Bytes(), scannedValue)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, MyType{}, *scannedValue)\n\t})\n}\n"
  },
  {
    "path": "internal/resp/scan.go",
    "content": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n\n// * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n// * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage resp\n\nimport (\n\t\"encoding\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/util\"\n)\n\n// Scan parses bytes `b` to `v` with appropriate type.\n//\n//nolint:gocyclo\nfunc Scan(b []byte, v interface{}) error {\n\tswitch v := v.(type) {\n\tcase nil:\n\t\treturn fmt.Errorf(\"olric: Scan(nil)\")\n\tcase *string:\n\t\t*v = util.BytesToString(b)\n\t\treturn nil\n\tcase *[]byte:\n\t\t*v = b\n\t\treturn nil\n\tcase *int:\n\t\tvar err error\n\t\t*v, err = util.Atoi(b)\n\t\treturn err\n\tcase *int8:\n\t\tn, err := util.ParseInt(b, 10, 8)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = int8(n)\n\t\treturn nil\n\tcase *int16:\n\t\tn, err := util.ParseInt(b, 10, 16)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = int16(n)\n\t\treturn nil\n\tcase *int32:\n\t\tn, err := util.ParseInt(b, 10, 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = int32(n)\n\t\treturn nil\n\tcase *int64:\n\t\tn, err := util.ParseInt(b, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = n\n\t\treturn nil\n\tcase *uint:\n\t\tn, err := util.ParseUint(b, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = uint(n)\n\t\treturn nil\n\tcase *uint8:\n\t\tn, err := util.ParseUint(b, 10, 8)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = uint8(n)\n\t\treturn nil\n\tcase *uint16:\n\t\tn, err := util.ParseUint(b, 10, 16)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = uint16(n)\n\t\treturn nil\n\tcase *uint32:\n\t\tn, err := util.ParseUint(b, 10, 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = uint32(n)\n\t\treturn nil\n\tcase *uint64:\n\t\tn, err := util.ParseUint(b, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = n\n\t\treturn nil\n\tcase *float32:\n\t\tn, err := util.ParseFloat(b, 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = float32(n)\n\t\treturn err\n\tcase *float64:\n\t\tvar err error\n\t\t*v, err = util.ParseFloat(b, 64)\n\t\treturn err\n\tcase *bool:\n\t\t*v = len(b) == 1 && b[0] == '1'\n\t\treturn nil\n\tcase *time.Time:\n\t\tvar err error\n\t\t*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))\n\t\treturn err\n\tcase *time.Duration:\n\t\tn, err := util.ParseInt(b, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = time.Duration(n)\n\t\treturn nil\n\tcase encoding.BinaryUnmarshaler:\n\t\treturn v.UnmarshalBinary(b)\n\tdefault:\n\t\treturn fmt.Errorf(\n\t\t\t\"olric: can't unmarshal %T (consider implementing BinaryUnmarshaler)\", v)\n\t}\n}\n"
  },
  {
    "path": "internal/roundrobin/round_robin.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage roundrobin\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n)\n\n// ErrEmptyInstance denotes that there is nothing in the round-robin instance to schedule.\nvar ErrEmptyInstance = errors.New(\"empty round-robin instance\")\n\n// RoundRobin implements quite simple round-robin scheduling algorithm to distribute load fairly between servers.\ntype RoundRobin struct {\n\t// Mutual exclusion lock is required here because the Get method\n\t// is called concurrently by the client component, and it modifies the state\n\t// in every call.\n\tmtx     sync.RWMutex\n\tcurrent int\n\titems   []string\n}\n\n// New returns a new RoundRobin instance.\nfunc New(items []string) *RoundRobin {\n\treturn &RoundRobin{\n\t\tcurrent: 0,\n\t\titems:   items,\n\t}\n}\n\n// Get returns an item.\nfunc (r *RoundRobin) Get() (string, error) {\n\t// Acquire the lock here. This function modifies the internal state.\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\n\tif len(r.items) == 0 {\n\t\treturn \"\", ErrEmptyInstance\n\t}\n\n\tif r.current >= len(r.items) {\n\t\tr.current %= len(r.items)\n\t}\n\n\tif r.current >= len(r.items) {\n\t\treturn \"\", fmt.Errorf(\"round-robin: corrupted internal state\")\n\t}\n\n\titem := r.items[r.current]\n\tr.current++\n\treturn item, nil\n}\n\n// Add adds a new item to the Round-Robin scheduler.\nfunc (r *RoundRobin) Add(item string) {\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\n\tr.items = append(r.items, item)\n}\n\n// Delete deletes an item from the Round-Robin scheduler.\nfunc (r *RoundRobin) Delete(item string) {\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\n\tfor i := 0; i < len(r.items); i++ {\n\t\tif r.items[i] == item {\n\t\t\tr.items = append(r.items[:i], r.items[i+1:]...)\n\t\t\ti--\n\t\t}\n\t}\n}\n\n// Length returns the count of items\nfunc (r *RoundRobin) Length() int {\n\tr.mtx.RLock()\n\tdefer r.mtx.RUnlock()\n\n\treturn len(r.items)\n}\n"
  },
  {
    "path": "internal/roundrobin/round_robin_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage roundrobin\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestRoundRobin(t *testing.T) {\n\titems := []string{\"127.0.0.1:2323\", \"127.0.0.1:4556\", \"127.0.0.1:7889\"}\n\tr := New(items)\n\n\tt.Run(\"Get\", func(t *testing.T) {\n\t\titems := make(map[string]int)\n\t\tfor i := 0; i < r.Length(); i++ {\n\t\t\titem, err := r.Get()\n\t\t\trequire.NoError(t, err)\n\t\t\titems[item]++\n\t\t}\n\t\tif len(items) != r.Length() {\n\t\t\tt.Fatalf(\"Expected item count: %d. Got: %d\", r.Length(), len(items))\n\t\t}\n\t})\n\n\tt.Run(\"Add\", func(t *testing.T) {\n\t\titem := \"127.0.0.1:3320\"\n\t\tr.Add(item)\n\t\titems := make(map[string]int)\n\t\tfor i := 0; i < r.Length(); i++ {\n\t\t\titem, err := r.Get()\n\t\t\trequire.NoError(t, err)\n\t\t\titems[item]++\n\t\t}\n\t\tif _, ok := items[item]; !ok {\n\t\t\tt.Fatalf(\"Item not processed: %s\", item)\n\t\t}\n\t\tif len(items) != r.Length() {\n\t\t\tt.Fatalf(\"Expected item count: %d. Got: %d\", r.Length(), len(items))\n\t\t}\n\t})\n\n\tt.Run(\"Delete\", func(t *testing.T) {\n\t\titem := \"127.0.0.1:7889\"\n\t\tr.Delete(item)\n\n\t\titems := make(map[string]int)\n\t\tfor i := 0; i < r.Length(); i++ {\n\t\t\titem, err := r.Get()\n\t\t\trequire.NoError(t, err)\n\t\t\titems[item]++\n\t\t}\n\t\tif _, ok := items[item]; ok {\n\t\t\tt.Fatalf(\"Item stil exists: %s\", item)\n\t\t}\n\t\tif len(items) != r.Length() {\n\t\t\tt.Fatalf(\"Expected item count: %d. Got: %d\", r.Length(), len(items))\n\t\t}\n\t})\n}\n\nfunc TestRoundRobin_Delete_NonExistent(t *testing.T) {\n\titems := []string{\"127.0.0.1:2323\", \"127.0.0.1:4556\", \"127.0.0.1:7889\"}\n\tr := New(items)\n\n\tvar fresh []string\n\tfresh = append(fresh, items...)\n\tfor i, item := range fresh {\n\t\tif i+1 == len(items) {\n\t\t\tr.Delete(item)\n\t\t} else {\n\t\t\tr.Delete(item)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "internal/server/client.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/roundrobin\"\n\t\"github.com/redis/go-redis/v9\"\n)\n\ntype Client struct {\n\tmu sync.RWMutex\n\n\tconfig     *config.Client\n\tclients    map[string]*redis.Client\n\troundRobin *roundrobin.RoundRobin\n}\n\nfunc NewClient(c *config.Client) *Client {\n\tif c == nil {\n\t\tc = config.NewClient()\n\t\terr := c.Sanitize()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"failed to sanitize client config: %s\", err))\n\t\t}\n\t}\n\treturn &Client{\n\t\tconfig:     c,\n\t\tclients:    make(map[string]*redis.Client),\n\t\troundRobin: roundrobin.New(nil),\n\t}\n}\n\nfunc (c *Client) Addresses() map[string]struct{} {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\taddresses := make(map[string]struct{})\n\tfor address := range c.clients {\n\t\taddresses[address] = struct{}{}\n\t}\n\treturn addresses\n}\n\nfunc (c *Client) Get(addr string) *redis.Client {\n\tc.mu.RLock()\n\trc, ok := c.clients[addr]\n\tif ok {\n\t\tc.mu.RUnlock()\n\t\treturn rc\n\t}\n\tc.mu.RUnlock()\n\n\t// Need the lock for writing, we modify c.clients map and the round-robin\n\t// implementation updates its internal state.\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// Need to check again, because another goroutine may have updated clients\n\t// between our calls to RUnlock and Lock.\n\tif rc, ok = c.clients[addr]; ok {\n\t\treturn rc\n\t}\n\n\topt := c.config.RedisOptions()\n\topt.Protocol = 2\n\topt.Addr = addr\n\trc = redis.NewClient(opt)\n\tc.clients[addr] = rc\n\tc.roundRobin.Add(addr)\n\treturn rc\n}\n\nfunc (c *Client) pickNodeRoundRobin() (string, error) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\taddr, err := c.roundRobin.Get()\n\tif errors.Is(err, roundrobin.ErrEmptyInstance) {\n\t\treturn \"\", fmt.Errorf(\"no available client found\")\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn addr, nil\n}\n\nfunc (c *Client) Pick() (*redis.Client, error) {\n\taddr, err := c.pickNodeRoundRobin()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Get(addr), nil\n}\n\nfunc (c *Client) Close(addr string) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\trc, ok := c.clients[addr]\n\tif ok {\n\t\terr := rc.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.roundRobin.Delete(addr)\n\t\tdelete(c.clients, addr)\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) Shutdown(ctx context.Context) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor addr, rc := range c.clients {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\tif err := rc.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdelete(c.clients, addr)\n\t\tc.roundRobin.Delete(addr)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "internal/server/client_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc TestServer_Client_Get(t *testing.T) {\n\tsrv := newServer(t)\n\tsrv.ServeMux().HandleFunc(protocol.Generic.Ping, func(conn redcon.Conn, cmd redcon.Command) {\n\t\tconn.WriteBulkString(\"pong\")\n\t})\n\n\t<-srv.StartedCtx.Done()\n\n\taddr := net.JoinHostPort(srv.config.BindAddr, strconv.Itoa(srv.config.BindPort))\n\tc := config.NewClient()\n\trequire.NoError(t, c.Sanitize())\n\n\tcs := NewClient(c)\n\trc := cs.Get(addr)\n\n\tctx := context.Background()\n\tcmd := protocol.NewPing().Command(ctx)\n\terr := rc.Process(ctx, cmd)\n\trequire.NoError(t, err)\n\n\tresult, err := cmd.Result()\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"pong\", result)\n\n\tt.Run(\"Fetch cached client\", func(t *testing.T) {\n\t\tnewClient := cs.Get(addr)\n\t\trequire.Equal(t, rc, newClient)\n\t})\n}\n\nfunc TestServer_Client_Pick(t *testing.T) {\n\tservers := make(map[string]*Server)\n\tfor i := 0; i < 10; i++ {\n\t\tsrv := newServer(t)\n\t\tsrv.ServeMux().HandleFunc(protocol.Generic.Ping, func(conn redcon.Conn, cmd redcon.Command) {\n\t\t\tconn.WriteBulkString(\"pong\")\n\t\t})\n\t\taddr := net.JoinHostPort(srv.config.BindAddr, strconv.Itoa(srv.config.BindPort))\n\t\tservers[addr] = srv\n\t}\n\n\tc := config.NewClient()\n\trequire.NoError(t, c.Sanitize())\n\n\tcs := NewClient(c)\n\n\tfor addr, srv := range servers {\n\t\t<-srv.StartedCtx.Done()\n\t\tcs.Get(addr)\n\t}\n\t// All the servers have been started.\n\n\tclients := make(map[string]struct{})\n\tfor i := 0; i < 100; i++ {\n\t\trc, err := cs.Pick()\n\t\trequire.NoError(t, err)\n\n\t\tctx := context.Background()\n\t\tcmd := protocol.NewPing().Command(ctx)\n\t\terr = rc.Process(ctx, cmd)\n\t\trequire.NoError(t, err)\n\n\t\tresult, err := cmd.Result()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, \"pong\", result)\n\t\tclients[rc.String()] = struct{}{}\n\t}\n\trequire.Greater(t, len(clients), 1)\n}\n\nfunc TestServer_Client_Close(t *testing.T) {\n\tsrv := newServer(t)\n\n\t<-srv.StartedCtx.Done()\n\n\tc := config.NewClient()\n\trequire.NoError(t, c.Sanitize())\n\n\taddr := net.JoinHostPort(srv.config.BindAddr, strconv.Itoa(srv.config.BindPort))\n\tcs := NewClient(c)\n\trc1 := cs.Get(addr)\n\n\trequire.NoError(t, cs.Close(addr))\n\trc2 := cs.Get(addr)\n\trequire.NotEqual(t, rc1, rc2)\n\trequire.Len(t, cs.clients, 1)\n\trequire.Equal(t, 1, cs.roundRobin.Length())\n}\n\nfunc TestServer_Client_Shutdown(t *testing.T) {\n\tservers := make(map[string]*Server)\n\tfor i := 0; i < 10; i++ {\n\t\tsrv := newServer(t)\n\t\tsrv.ServeMux().HandleFunc(protocol.Generic.Ping, func(conn redcon.Conn, cmd redcon.Command) {\n\t\t\tconn.WriteBulkString(\"pong\")\n\t\t})\n\t\taddr := net.JoinHostPort(srv.config.BindAddr, strconv.Itoa(srv.config.BindPort))\n\t\tservers[addr] = srv\n\t}\n\n\tc := config.NewClient()\n\trequire.NoError(t, c.Sanitize())\n\n\tcs := NewClient(c)\n\n\tfor addr, srv := range servers {\n\t\t<-srv.StartedCtx.Done()\n\t\tcs.Get(addr)\n\t}\n\t// All the servers have been started.\n\terr := cs.Shutdown(context.Background())\n\trequire.NoError(t, err)\n\trequire.Empty(t, cs.clients)\n\trequire.Equal(t, 0, cs.roundRobin.Length())\n}\n"
  },
  {
    "path": "internal/server/handler.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/util\"\n\t\"github.com/tidwall/redcon\"\n)\n\ntype ServeMuxWrapper struct {\n\tmux     *ServeMux\n\tprecond func(conn redcon.Conn, cmd redcon.Command) bool\n}\n\n// The HandlerFunc type is an adapter to allow the use of\n// ordinary functions as RESP handlers. If f is a function\n// with the appropriate signature, HandlerFunc(f) is a\n// Handler that calls f.\ntype HandlerFunc func(conn redcon.Conn, cmd redcon.Command)\n\ntype Handler struct {\n\thandler      func(conn redcon.Conn, cmd redcon.Command)\n\tprecondition func(conn redcon.Conn, cmd redcon.Command) bool\n}\n\n// ServeRESP calls f(w, r)\nfunc (h Handler) ServeRESP(conn redcon.Conn, cmd redcon.Command) {\n\tCommandsTotal.Increase(1)\n\n\tif len(cmd.Args) == 0 {\n\t\t// A client may form a bad message, prevent panicking.\n\t\th.handler(conn, cmd)\n\t\treturn\n\t}\n\tcommand := util.BytesToString(cmd.Args[0])\n\tif command == \"pubsub\" || command == \"PUBSUB\" {\n\t\tcommand = fmt.Sprintf(\"%s %s\", command, util.BytesToString(cmd.Args[1]))\n\t}\n\n\t// Do not call precondition function for the following commands:\n\t// * Internal.UpdateRouting\n\t// * Generic.Auth\n\tif command == protocol.Internal.UpdateRouting || command == protocol.Generic.Auth {\n\t\th.handler(conn, cmd)\n\t\treturn\n\t}\n\n\tif h.precondition == nil {\n\t\t// No precondition\n\t\th.handler(conn, cmd)\n\t\treturn\n\t}\n\n\tif h.precondition(conn, cmd) {\n\t\th.handler(conn, cmd)\n\t}\n}\n\n// HandleFunc registers the handler function for the given command.\nfunc (m *ServeMuxWrapper) HandleFunc(command string, handler func(conn redcon.Conn, cmd redcon.Command)) {\n\tif handler == nil {\n\t\tpanic(\"server: nil handler\")\n\t}\n\tm.mux.Handle(command, Handler{\n\t\thandler:      handler,\n\t\tprecondition: m.precond,\n\t})\n}\n"
  },
  {
    "path": "internal/server/handler_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\t\"sync/atomic\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc respEcho(t *testing.T, s *Server) {\n\tdata := make([]byte, 8)\n\t_, err := rand.Read(data)\n\trequire.NoError(t, err)\n\n\ts.ServeMux().HandleFunc(protocol.DMap.Get, func(conn redcon.Conn, cmd redcon.Command) {\n\t\tconn.WriteBulk(data)\n\t})\n\n\t<-s.StartedCtx.Done()\n\n\trdb := redis.NewClient(defaultRedisOptions(s.config))\n\n\tctx := context.Background()\n\tcmd := protocol.NewGet(\"mydmap\", \"mykey\").Command(ctx)\n\terr = rdb.Process(ctx, cmd)\n\trequire.NoError(t, err)\n\n\tresult, err := cmd.Bytes()\n\trequire.NoError(t, err)\n\trequire.Equal(t, data, result)\n}\n\nfunc TestHandler_ServeRESP_PreCondition(t *testing.T) {\n\tvar precond int32\n\ts := newServerWithPreConditionFunc(t, func(conn redcon.Conn, cmd redcon.Command) bool {\n\t\tatomic.AddInt32(&precond, 1)\n\t\treturn true\n\t})\n\n\tdefer func() {\n\t\trequire.NoError(t, s.Shutdown(context.Background()))\n\t}()\n\n\trespEcho(t, s)\n\trequire.Equal(t, int32(1), atomic.LoadInt32(&precond))\n}\n\nfunc TestHandler_ServeRESP_PreCondition_DontCheck(t *testing.T) {\n\tvar precond int32\n\ts := newServerWithPreConditionFunc(t, func(conn redcon.Conn, cmd redcon.Command) bool {\n\t\tatomic.AddInt32(&precond, 1)\n\t\treturn true\n\t})\n\n\tdefer func() {\n\t\trequire.NoError(t, s.Shutdown(context.Background()))\n\t}()\n\n\tdata := make([]byte, 8)\n\t_, err := rand.Read(data)\n\trequire.NoError(t, err)\n\n\t// The node is bootstrapped by UpdateRoutingCmd. Don't check any preconditions to run that command.\n\ts.ServeMux().HandleFunc(protocol.Internal.UpdateRouting, func(conn redcon.Conn, cmd redcon.Command) {\n\t\tconn.WriteBulk(data)\n\t})\n\n\t<-s.StartedCtx.Done()\n\n\trdb := redis.NewClient(defaultRedisOptions(s.config))\n\n\tctx := context.Background()\n\tcmd := protocol.NewUpdateRouting([]byte(\"dummy-data\"), 1).Command(ctx)\n\terr = rdb.Process(ctx, cmd)\n\trequire.NoError(t, err)\n\n\tresult, err := cmd.Bytes()\n\trequire.NoError(t, err)\n\trequire.Equal(t, data, result)\n\n\trequire.Equal(t, int32(0), atomic.LoadInt32(&precond))\n}\n"
  },
  {
    "path": "internal/server/mux.go",
    "content": "// The MIT License (MIT)\n//\n// Copyright (c) 2016 Josh Baker\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy of\n// this software and associated documentation files (the \"Software\"), to deal in\n// the Software without restriction, including without limitation the rights to\n// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n// the Software, and to permit persons to whom the Software is furnished to do so,\n// subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in all\n// copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\npackage server\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/util\"\n\t\"github.com/tidwall/redcon\"\n)\n\n// errAuthRequired represents an error indicating that authentication is required to access the requested resource or operation.\nvar errAuthRequired = errors.New(\"Authentication required.\")\n\n// ServeMux is an RESP command multiplexer.\ntype ServeMux struct {\n\tconfig   *Config\n\thandlers map[string]redcon.Handler\n}\n\n// NewServeMux allocates and returns a new ServeMux.\nfunc NewServeMux(c *Config) *ServeMux {\n\tprotocol.SetError(\"NOAUTH\", errAuthRequired)\n\treturn &ServeMux{\n\t\tconfig:   c,\n\t\thandlers: make(map[string]redcon.Handler),\n\t}\n}\n\n// HandleFunc registers the handler function for the given command.\nfunc (m *ServeMux) HandleFunc(command string, handler redcon.Handler) {\n\tif handler == nil {\n\t\tpanic(\"olric: nil handler\")\n\t}\n\tm.Handle(command, handler)\n}\n\n// Handle registers the handler for the given command.\n// If a handler already exists for command, Handle panics.\nfunc (m *ServeMux) Handle(command string, handler redcon.Handler) {\n\tif command == \"\" {\n\t\tpanic(\"olric: invalid command\")\n\t}\n\tif handler == nil {\n\t\tpanic(\"olric: nil handler\")\n\t}\n\tif _, exist := m.handlers[command]; exist {\n\t\tpanic(\"olric: multiple registrations for \" + command)\n\t}\n\n\tm.handlers[command] = handler\n}\n\n// ServeRESP dispatches the command to the handler.\nfunc (m *ServeMux) ServeRESP(conn redcon.Conn, cmd redcon.Command) {\n\tcommand := strings.ToLower(util.BytesToString(cmd.Args[0]))\n\n\tif m.config.RequireAuth && command != protocol.Generic.Auth {\n\t\tctx := conn.Context().(*ConnContext)\n\t\tif !ctx.IsAuthenticated() {\n\t\t\tprotocol.WriteError(conn, errAuthRequired)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif handler, ok := m.handlers[command]; ok {\n\t\thandler.ServeRESP(conn, cmd)\n\t\treturn\n\t}\n\n\tif command == protocol.PubSub.PubSub {\n\t\tif len(cmd.Args) < 2 {\n\t\t\tprotocol.WriteError(conn, fmt.Errorf(\"wrong number of arguments for '%s' command\", command))\n\t\t\treturn\n\t\t}\n\t\tcommand = fmt.Sprintf(\"%s %s\", command, util.BytesToString(cmd.Args[1]))\n\t}\n\n\tif handler, ok := m.handlers[command]; ok {\n\t\thandler.ServeRESP(conn, cmd)\n\t\treturn\n\t}\n\n\tprotocol.WriteError(conn, fmt.Errorf(\"unknown command '%s'\", command))\n}\n"
  },
  {
    "path": "internal/server/mux_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"context\"\n\t\"math/rand\"\n\t\"testing\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc TestMux_PubSub_Command(t *testing.T) {\n\ts := newServer(t)\n\n\tdata := make([]byte, 8)\n\t_, err := rand.Read(data)\n\trequire.NoError(t, err)\n\n\ts.ServeMux().HandleFunc(protocol.PubSub.PubSubNumpat, func(conn redcon.Conn, cmd redcon.Command) {\n\t\tconn.WriteInt(10)\n\t})\n\n\t<-s.StartedCtx.Done()\n\n\trdb := redis.NewClient(defaultRedisOptions(s.config))\n\n\tctx := context.Background()\n\tvar args []interface{}\n\targs = append(args, \"pubsub\")\n\targs = append(args, \"numpat\")\n\tcmd := redis.NewIntCmd(ctx, args...)\n\terr = rdb.Process(ctx, cmd)\n\trequire.NoError(t, err)\n\n\tnum, err := cmd.Result()\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(10), num)\n}\n"
  },
  {
    "path": "internal/server/server.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/checkpoint\"\n\t\"github.com/olric-data/olric/internal/stats\"\n\t\"github.com/olric-data/olric/pkg/flog\"\n\t\"github.com/tidwall/redcon\"\n)\n\nvar (\n\t// CommandsTotal is the total number of all requests broken down by command (get, put, etc.) and status.\n\tCommandsTotal = stats.NewInt64Counter()\n\n\t// ConnectionsTotal is the total number of connections opened since the server started running.\n\tConnectionsTotal = stats.NewInt64Counter()\n\n\t// CurrentConnections is the current number of open connections.\n\tCurrentConnections = stats.NewInt64Gauge()\n\n\t// WrittenBytesTotal is the total number of bytes sent by this server to network.\n\tWrittenBytesTotal = stats.NewInt64Counter()\n\n\t// ReadBytesTotal is the total number of bytes read by this server from network.\n\tReadBytesTotal = stats.NewInt64Counter()\n)\n\n// Config is a composite type to bundle configuration parameters.\ntype Config struct {\n\tBindAddr        string\n\tBindPort        int\n\tKeepAlivePeriod time.Duration\n\tIdleClose       time.Duration\n\tRequireAuth     bool\n}\n\n// ConnContext represents the context for a connection with authentication state management.\ntype ConnContext struct {\n\tmtx sync.RWMutex\n\n\t// authenticated indicates whether the connection is successfully authenticated.\n\tauthenticated bool\n}\n\n// NewConnContext initializes and returns a new instance of ConnContext for managing connection states like authentication.\nfunc NewConnContext() *ConnContext {\n\treturn &ConnContext{}\n}\n\n// SetAuthenticated sets the authentication state of the connection to the specified value. It is thread-safe.\nfunc (c *ConnContext) SetAuthenticated(authenticated bool) {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\n\tc.authenticated = authenticated\n}\n\n// IsAuthenticated checks if the connection is authenticated. It is thread-safe and returns true if authenticated.\nfunc (c *ConnContext) IsAuthenticated() bool {\n\tc.mtx.RLock()\n\tdefer c.mtx.RUnlock()\n\n\treturn c.authenticated\n}\n\n// ConnWrapper is a wrapper around net.Conn that enables tracking of read and written bytes.\ntype ConnWrapper struct {\n\tnet.Conn\n}\n\n// Write sends data over the underlying connection and updates the total written bytes counter.\n// It returns the number of bytes written and any error encountered.\nfunc (cw *ConnWrapper) Write(b []byte) (n int, err error) {\n\tnr, err := cw.Conn.Write(b)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tWrittenBytesTotal.Increase(int64(nr))\n\treturn nr, nil\n}\n\n// Read reads data into the provided byte slice, updates the read bytes counter, and returns the number of bytes read.\nfunc (cw *ConnWrapper) Read(b []byte) (n int, err error) {\n\tnr, err := cw.Conn.Read(b)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tReadBytesTotal.Increase(int64(nr))\n\treturn nr, nil\n}\n\n// ListenerWrapper is a wrapper around net.Listener that supports setting a TCP keep-alive period for accepted connections.\ntype ListenerWrapper struct {\n\tnet.Listener\n\tkeepAlivePeriod time.Duration\n}\n\n// Accept waits for and returns the next connection to the ListenerWrapper, applying TCP keep-alive settings if specified.\nfunc (lw *ListenerWrapper) Accept() (net.Conn, error) {\n\tconn, err := lw.Listener.Accept()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif tcpConn, ok := conn.(*net.TCPConn); ok {\n\t\tif lw.keepAlivePeriod != 0 {\n\t\t\tif keepAliveErr := tcpConn.SetKeepAlive(true); keepAliveErr != nil {\n\t\t\t\treturn nil, keepAliveErr\n\t\t\t}\n\t\t\tif keepAliveErr := tcpConn.SetKeepAlivePeriod(lw.keepAlivePeriod); keepAliveErr != nil {\n\t\t\t\treturn nil, keepAliveErr\n\t\t\t}\n\t\t}\n\t}\n\treturn &ConnWrapper{conn}, nil\n}\n\n// Server is a TCP server struct that manages configurations, logging, and connection handling for RESP-based protocols.\ntype Server struct {\n\tconfig     *Config\n\tmux        *ServeMux\n\twmux       *ServeMuxWrapper\n\tserver     *redcon.Server\n\tlog        *flog.Logger\n\tlistener   *ListenerWrapper\n\tStartedCtx context.Context\n\tstarted    context.CancelFunc\n\tctx        context.Context\n\tcancel     context.CancelFunc\n\twg         sync.WaitGroup\n\t// some components of the TCP server should be closed after the listener\n\tstopped chan struct{}\n}\n\n// New initializes and returns a new Server configured with the specified Config and Logger.\nfunc New(c *Config, l *flog.Logger) *Server {\n\t// The server has to be started properly before accepting connections.\n\tcheckpoint.Add()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tstartedCtx, started := context.WithCancel(context.Background())\n\ts := &Server{\n\t\tconfig:     c,\n\t\tmux:        NewServeMux(c),\n\t\tlog:        l,\n\t\tstarted:    started,\n\t\tStartedCtx: startedCtx,\n\t\tstopped:    make(chan struct{}),\n\t\tctx:        ctx,\n\t\tcancel:     cancel,\n\t}\n\ts.wmux = &ServeMuxWrapper{mux: s.mux}\n\treturn s\n}\n\n// SetPreConditionFunc sets a precondition function to be executed before serving each command on the server.\nfunc (s *Server) SetPreConditionFunc(f func(conn redcon.Conn, cmd redcon.Command) bool) {\n\tselect {\n\tcase <-s.StartedCtx.Done():\n\t\t// It's already started.\n\t\treturn\n\tdefault:\n\t}\n\ts.wmux.precond = f\n}\n\nfunc (s *Server) ServeMux() *ServeMuxWrapper {\n\treturn s.wmux\n}\n\n// ListenAndServe starts the TCP server, initializes internal components, and begins accepting connections.\nfunc (s *Server) ListenAndServe() error {\n\taddr := net.JoinHostPort(s.config.BindAddr, strconv.Itoa(s.config.BindPort))\n\tlistener, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlw := &ListenerWrapper{\n\t\tListener:        listener,\n\t\tkeepAlivePeriod: s.config.KeepAlivePeriod,\n\t}\n\n\tdefer close(s.stopped)\n\ts.listener = lw\n\n\tsrv := redcon.NewServer(addr,\n\t\ts.mux.ServeRESP,\n\t\tfunc(conn redcon.Conn) bool {\n\t\t\tconn.SetContext(NewConnContext())\n\t\t\tConnectionsTotal.Increase(1)\n\t\t\tCurrentConnections.Increase(1)\n\t\t\treturn true\n\t\t},\n\t\tfunc(conn redcon.Conn, err error) {\n\t\t\tCurrentConnections.Increase(-1)\n\t\t},\n\t)\n\n\tif s.config.IdleClose != 0 {\n\t\tsrv.SetIdleClose(s.config.IdleClose)\n\t}\n\ts.server = srv\n\n\t// The TCP server has been started\n\ts.started()\n\tcheckpoint.Pass()\n\treturn s.server.Serve(lw)\n}\n\n// Shutdown gracefully shuts down the server without interrupting any active connections.\n// Shutdown works by first closing all open listeners, then closing all idle connections,\n// and then waiting indefinitely for connections to return to idle and then shut down.\n// If the provided context expires before the shutdown is complete, Shutdown returns\n// the context's error; otherwise it returns any error returned from closing the Server's\n// underlying Listener(s).\nfunc (s *Server) Shutdown(ctx context.Context) error {\n\tselect {\n\tcase <-s.ctx.Done():\n\t\t// It's already closed.\n\t\treturn nil\n\tdefault:\n\t}\n\n\ts.cancel()\n\n\tif s.server == nil {\n\t\t// There is nothing to close.\n\t\treturn nil\n\t}\n\n\tvar latestError error\n\terr := s.server.Close()\n\tif err != nil {\n\t\ts.log.V(2).Printf(\"[ERROR] Failed to close listener: %v\", err)\n\t\tlatestError = err\n\t}\n\n\t// Listener is closed successfully. Now we can await for closing\n\t// other components of the TCP server.\n\t<-s.stopped\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\ts.wg.Wait()\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\terr = ctx.Err()\n\t\tif err != nil {\n\t\t\ts.log.V(2).Printf(\"[ERROR] Context has an error: %v\", err)\n\t\t\tlatestError = err\n\t\t}\n\tcase <-done:\n\t}\n\n\treturn latestError\n}\n"
  },
  {
    "path": "internal/server/server_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/pkg/flog\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/tidwall/redcon\"\n)\n\n// getFreePort copied from testutil package to prevent cycle import.\nfunc getFreePort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tport := l.Addr().(*net.TCPAddr).Port\n\tif err := l.Close(); err != nil {\n\t\treturn 0, err\n\t}\n\treturn port, nil\n}\n\nfunc newServerWithPreConditionFunc(t *testing.T, precond func(conn redcon.Conn, cmd redcon.Command) bool) *Server {\n\tbindPort, err := getFreePort()\n\tif err != nil {\n\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t}\n\n\tl := log.New(os.Stdout, \"server-test: \", log.LstdFlags)\n\tfl := flog.New(l)\n\tfl.SetLevel(6)\n\tfl.ShowLineNumber(1)\n\tc := &Config{\n\t\tBindAddr:        \"127.0.0.1\",\n\t\tBindPort:        bindPort,\n\t\tKeepAlivePeriod: time.Second,\n\t}\n\ts := New(c, fl)\n\ts.SetPreConditionFunc(precond)\n\n\tgo func() {\n\t\terr := s.ListenAndServe()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t}()\n\n\tt.Cleanup(func() {\n\t\terr = s.Shutdown(context.Background())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got: %v\", err)\n\t\t}\n\t})\n\n\treturn s\n}\n\nfunc newServer(t *testing.T) *Server {\n\tsrv := newServerWithPreConditionFunc(t, nil)\n\tt.Cleanup(func() {\n\t\trequire.NoError(t, srv.Shutdown(context.Background()))\n\t})\n\treturn srv\n}\n\nfunc defaultRedisOptions(c *Config) *redis.Options {\n\treturn &redis.Options{\n\t\tAddr: net.JoinHostPort(c.BindAddr, strconv.Itoa(c.BindPort)),\n\t}\n}\n\nfunc TestServer_RESP(t *testing.T) {\n\ts := newServer(t)\n\n\trespEcho(t, s)\n}\n\nfunc TestServer_RESP_Stats(t *testing.T) {\n\ts := newServer(t)\n\n\trespEcho(t, s)\n\n\trequire.NotEqual(t, int64(0), CommandsTotal.Read())\n\trequire.NotEqual(t, int64(0), ConnectionsTotal.Read())\n\trequire.NotEqual(t, int64(0), CurrentConnections.Read())\n\trequire.NotEqual(t, int64(0), WrittenBytesTotal.Read())\n\trequire.NotEqual(t, int64(0), ReadBytesTotal.Read())\n}\n\nfunc TestConnContext_Authentication(t *testing.T) {\n\tctx := NewConnContext()\n\trequire.False(t, ctx.IsAuthenticated())\n\n\tt.Run(\"Authenticated\", func(t *testing.T) {\n\t\tctx.SetAuthenticated(true)\n\t\trequire.True(t, ctx.IsAuthenticated())\n\t})\n}\n"
  },
  {
    "path": "internal/service/service.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage service\n\nimport (\n\t\"context\"\n)\n\ntype Service interface {\n\tStart() error\n\tRegisterHandlers()\n\tShutdown(ctx context.Context) error\n}\n"
  },
  {
    "path": "internal/stats/stats.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage stats\n\nimport \"sync/atomic\"\n\n// Int64Counter is a cumulative metric that represents a single monotonically\n// increasing counter whose value can only increase or be reset to zero on restart.\ntype Int64Counter struct {\n\tcounter int64\n}\n\n// NewInt64Counter returns a new Int64Counter\nfunc NewInt64Counter() *Int64Counter {\n\treturn &Int64Counter{}\n}\n\n// Increase increases the counter by delta.\nfunc (c *Int64Counter) Increase(delta int64) {\n\tatomic.AddInt64(&c.counter, delta)\n}\n\n// Read returns the current value of counter.\nfunc (c *Int64Counter) Read() int64 {\n\treturn atomic.LoadInt64(&c.counter)\n}\n\n// Reset sets zero to the underlying counter.\nfunc (c *Int64Counter) Reset() {\n\tatomic.StoreInt64(&c.counter, 0)\n}\n\n// Int64Gauge is a metric that represents a single numerical value that can\n// arbitrarily go up and down.\ntype Int64Gauge struct {\n\tgauge int64\n}\n\n// NewInt64Gauge returns a new Int64Gauge\nfunc NewInt64Gauge() *Int64Gauge {\n\treturn &Int64Gauge{}\n}\n\n// Increase increases the gauge by delta.\nfunc (c *Int64Gauge) Increase(delta int64) {\n\tatomic.AddInt64(&c.gauge, delta)\n}\n\n// Decrease decreases the counter by delta.\nfunc (c *Int64Gauge) Decrease(delta int64) {\n\tatomic.AddInt64(&c.gauge, -1*delta)\n}\n\n// Read returns the current value of gauge.\nfunc (c *Int64Gauge) Read() int64 {\n\treturn atomic.LoadInt64(&c.gauge)\n}\n\n// Reset sets zero to the underlying gauge.\nfunc (c *Int64Gauge) Reset() {\n\tatomic.StoreInt64(&c.gauge, 0)\n}\n"
  },
  {
    "path": "internal/stats/stats_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage stats\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestUint64Counter(t *testing.T) {\n\tc := NewInt64Counter()\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc.Increase(1)\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\trequire.Equal(t, int64(100), c.Read())\n}\n\nfunc TestUint64Gauge(t *testing.T) {\n\tg := NewInt64Gauge()\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tg.Increase(1)\n\t\t}()\n\t}\n\n\tfor i := 0; i < 20; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tg.Decrease(1)\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\trequire.Equal(t, int64(80), g.Read())\n}\n"
  },
  {
    "path": "internal/testcluster/testcluster.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage testcluster\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/cluster/balancer\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/cluster/routingtable\"\n\t\"github.com/olric-data/olric/internal/environment\"\n\t\"github.com/olric-data/olric/internal/locker\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/internal/service\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\ntype TestCluster struct {\n\tmu sync.Mutex\n\n\tenvironments []*environment.Environment\n\tmemberPorts  []int\n\tconstructor  func(e *environment.Environment) (service.Service, error)\n\terrGr        errgroup.Group\n\tctx          context.Context\n\tcancel       context.CancelFunc\n}\n\nfunc NewEnvironment(c *config.Config) *environment.Environment {\n\tif c == nil {\n\t\tc = testutil.NewConfig()\n\t}\n\n\te := environment.New()\n\te.Set(\"config\", c)\n\te.Set(\"logger\", testutil.NewFlogger(c))\n\te.Set(\"client\", server.NewClient(c.Client))\n\te.Set(\"primary\", partitions.New(c.PartitionCount, partitions.PRIMARY))\n\te.Set(\"backup\", partitions.New(c.PartitionCount, partitions.BACKUP))\n\te.Set(\"locker\", locker.New())\n\te.Set(\"server\", testutil.NewServer(c))\n\treturn e\n}\n\nfunc (t *TestCluster) newService(e *environment.Environment) service.Service {\n\trt := routingtable.New(e)\n\te.Set(\"routingtable\", rt)\n\n\tb := balancer.New(e)\n\te.Set(\"balancer\", b)\n\tt.errGr.Go(func() error {\n\t\t<-t.ctx.Done()\n\t\treturn b.Shutdown(context.Background())\n\t})\n\n\tsrv := e.Get(\"server\").(*server.Server)\n\tgo func() {\n\t\terr := srv.ListenAndServe()\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"ListenAndServe returned an error: %v\", err))\n\t\t}\n\t}()\n\tt.errGr.Go(func() error {\n\t\t<-t.ctx.Done()\n\t\treturn srv.Shutdown(context.Background())\n\t})\n\t<-srv.StartedCtx.Done()\n\n\ts, err := t.constructor(e)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to start DMap service: %v\", err))\n\t}\n\treturn s\n}\n\nfunc New(constructor func(e *environment.Environment) (service.Service, error)) *TestCluster {\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &TestCluster{\n\t\tconstructor: constructor,\n\t\tctx:         ctx,\n\t\tcancel:      cancel,\n\t}\n}\n\nfunc (t *TestCluster) syncCluster() {\n\t// Update routing table on the cluster before running balancer\n\tfor _, e := range t.environments {\n\t\trt := e.Get(\"routingtable\").(*routingtable.RoutingTable)\n\t\tif rt.Discovery().IsCoordinator() {\n\t\t\t// The coordinator pushes the routing table immediately.\n\t\t\t// Normally, this is triggered by every cluster event but we don't want to\n\t\t\t// do this asynchronously to avoid randomness in tests.\n\t\t\trt.UpdateEagerly()\n\t\t}\n\t}\n\t// Normally, balancer is triggered by routing table after a successful update, but we don't want to\n\t// balance the test cluster asynchronously. So we balance the partitions here explicitly.\n\tfor _, e := range t.environments {\n\t\te.Get(\"balancer\").(*balancer.Balancer).BalanceEagerly()\n\t}\n}\n\nfunc (t *TestCluster) AddMember(e *environment.Environment) service.Service {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tif e == nil {\n\t\te = NewEnvironment(nil)\n\t}\n\tc := e.Get(\"config\").(*config.Config)\n\tpartitions.SetHashFunc(c.Hasher)\n\n\tport, err := testutil.GetFreePort()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to a random port: %v\", err))\n\t}\n\tc.MemberlistConfig.BindPort = port\n\n\tvar peers []string\n\tfor _, peerPort := range t.memberPorts {\n\t\tpeers = append(peers, net.JoinHostPort(\"127.0.0.1\", strconv.Itoa(peerPort)))\n\t}\n\tc.Peers = peers\n\n\ts := t.newService(e)\n\trt := e.Get(\"routingtable\").(*routingtable.RoutingTable)\n\terr = rt.Join()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to join the Olric cluster: %v\", err))\n\t}\n\terr = rt.Start()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to start the routing table: %v\", err))\n\t}\n\n\tt.errGr.Go(func() error {\n\t\t<-t.ctx.Done()\n\t\treturn rt.Shutdown(context.Background())\n\t})\n\n\tt.errGr.Go(func() error {\n\t\treturn s.Start()\n\t})\n\n\tt.errGr.Go(func() error {\n\t\t<-t.ctx.Done()\n\t\treturn s.Shutdown(context.Background())\n\t})\n\n\tt.environments = append(t.environments, e)\n\tt.memberPorts = append(t.memberPorts, port)\n\tt.syncCluster()\n\treturn s\n}\n\nfunc (t *TestCluster) Shutdown() {\n\tt.cancel()\n\terr := t.errGr.Wait()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to shutdown the cluster: %v\", err))\n\t}\n}\n"
  },
  {
    "path": "internal/testutil/mockfragment/mockfragment.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage mockfragment\n\nimport (\n\t\"crypto/rand\"\n\t\"fmt\"\n\tmrand \"math/rand\"\n\t\"sync\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/pkg/storage\"\n)\n\ntype Result struct {\n\tName   string\n\tOwners []discovery.Member\n}\n\ntype MockFragment struct {\n\tsync.RWMutex\n\tm      map[string]interface{}\n\tresult map[partitions.Kind]map[uint64]Result\n}\n\nfunc New() *MockFragment {\n\treturn &MockFragment{\n\t\tm:      make(map[string]interface{}),\n\t\tresult: make(map[partitions.Kind]map[uint64]Result),\n\t}\n}\n\nfunc (f *MockFragment) Stats() storage.Stats {\n\tf.Lock()\n\tdefer f.Unlock()\n\treturn storage.Stats{\n\t\tLength: len(f.m),\n\t}\n}\n\nfunc (f *MockFragment) Name() string {\n\treturn \"Mock-DMap\"\n}\n\nfunc (f *MockFragment) Put(key string, value interface{}) {\n\tf.Lock()\n\tdefer f.Unlock()\n\tf.m[key] = value\n}\n\nfunc (f *MockFragment) Get(key string) interface{} {\n\tf.Lock()\n\tdefer f.Unlock()\n\treturn f.m[key]\n}\n\nfunc (f *MockFragment) Delete(key string) {\n\tf.Lock()\n\tdefer f.Unlock()\n\tdelete(f.m, key)\n}\n\nfunc (f *MockFragment) Fill() {\n\tn := 5\n\tb := make([]byte, n)\n\trandKey := func() string {\n\t\tif _, err := rand.Read(b); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn fmt.Sprintf(\"%X\", b)\n\t}\n\tnum := mrand.Intn(100)\n\tfor i := 0; i < num; i++ {\n\t\tf.Put(randKey(), i)\n\t}\n}\n\nfunc (f *MockFragment) Result() map[partitions.Kind]map[uint64]Result {\n\treturn f.result\n}\n\nfunc (f *MockFragment) Move(part *partitions.Partition, name string, owners []discovery.Member) error {\n\tf.Lock()\n\tdefer f.Unlock()\n\n\tf.result[part.Kind()] = map[uint64]Result{\n\t\tpart.ID(): {\n\t\t\tName:   name,\n\t\t\tOwners: owners,\n\t\t},\n\t}\n\n\tfor key := range f.m {\n\t\tdelete(f.m, key)\n\t}\n\n\treturn nil\n}\n\nfunc (f *MockFragment) Compaction() (bool, error) {\n\treturn false, nil\n}\n\nfunc (f *MockFragment) Destroy() error {\n\tf.Lock()\n\tdefer f.Unlock()\n\tf.m = make(map[string]interface{})\n\treturn nil\n}\n\nfunc (f *MockFragment) Close() error {\n\treturn nil\n}\n\nvar _ partitions.Fragment = (*MockFragment)(nil)\n"
  },
  {
    "path": "internal/testutil/testutil.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage testutil\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/hashicorp/memberlist\"\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/pkg/flog\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc GetFreePort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tport := l.Addr().(*net.TCPAddr).Port\n\tif err := l.Close(); err != nil {\n\t\treturn 0, err\n\t}\n\treturn port, nil\n}\n\nfunc NewFlogger(c *config.Config) *flog.Logger {\n\tflogger := flog.New(c.Logger)\n\tflogger.SetLevel(c.LogVerbosity)\n\tif c.LogLevel == \"DEBUG\" {\n\t\tflogger.ShowLineNumber(1)\n\t}\n\treturn flogger\n}\n\nfunc NewEngineConfig(t *testing.T) *config.Engine {\n\te := config.NewEngine()\n\trequire.NoError(t, e.Sanitize())\n\trequire.NoError(t, e.Validate())\n\treturn e\n}\n\nfunc NewConfig() *config.Config {\n\tc := config.New(\"local\")\n\tc.PartitionCount = 7\n\tmc := memberlist.DefaultLocalConfig()\n\tmc.BindAddr = \"127.0.0.1\"\n\tmc.BindPort = 0\n\tc.MemberlistConfig = mc\n\n\tport, err := GetFreePort()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"GetFreePort returned an error: %v\", err))\n\t}\n\tc.BindAddr = \"127.0.0.1\"\n\tc.BindPort = port\n\tc.MemberlistConfig.Name = net.JoinHostPort(c.BindAddr, strconv.Itoa(c.BindPort))\n\tc.LeaveTimeout = 500 * time.Millisecond\n\tif err := c.Sanitize(); err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to sanitize default config: %v\", err))\n\t}\n\treturn c\n}\n\nfunc NewServer(c *config.Config) *server.Server {\n\tsc := &server.Config{\n\t\tBindAddr:        c.BindAddr,\n\t\tBindPort:        c.BindPort,\n\t\tKeepAlivePeriod: time.Second,\n\t}\n\tl := NewFlogger(c)\n\treturn server.New(sc, l)\n}\n\nfunc TryWithInterval(max int, interval time.Duration, f func() error) error {\n\tticker := time.NewTicker(interval)\n\tdefer ticker.Stop()\n\n\tvar err error\n\terr = f()\n\tif err == nil {\n\t\t// Done. No need to try with interval\n\t\treturn nil\n\t}\n\n\tvar count = 1\n\tfor count < max {\n\t\t<-ticker.C\n\t\tcount++\n\t\terr = f()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}\n\nfunc ToKey(i int) string {\n\treturn fmt.Sprintf(\"%09d\", i)\n}\n\nfunc ToVal(i int) []byte {\n\treturn []byte(fmt.Sprintf(\"%010d\", i))\n}\n"
  },
  {
    "path": "internal/util/safe.go",
    "content": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n\n// * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n// * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n//go:build appengine\n// +build appengine\n\npackage util\n\nfunc BytesToString(b []byte) string {\n\treturn string(b)\n}\n\nfunc StringToBytes(s string) []byte {\n\treturn []byte(s)\n}\n"
  },
  {
    "path": "internal/util/strconv.go",
    "content": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n\n// * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n// * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage util\n\nimport \"strconv\"\n\nfunc Atoi(b []byte) (int, error) {\n\treturn strconv.Atoi(BytesToString(b))\n}\n\nfunc ParseInt(b []byte, base int, bitSize int) (int64, error) {\n\treturn strconv.ParseInt(BytesToString(b), base, bitSize)\n}\n\nfunc ParseUint(b []byte, base int, bitSize int) (uint64, error) {\n\treturn strconv.ParseUint(BytesToString(b), base, bitSize)\n}\n\nfunc ParseFloat(b []byte, bitSize int) (float64, error) {\n\treturn strconv.ParseFloat(BytesToString(b), bitSize)\n}\n"
  },
  {
    "path": "internal/util/unsafe.go",
    "content": "// Copyright (c) 2013 The github.com/go-redis/redis Authors.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n\n// * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n// * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n//go:build !appengine\n// +build !appengine\n\npackage util\n\nimport (\n\t\"unsafe\"\n)\n\n// BytesToString converts byte slice to string.\nfunc BytesToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}\n\n// StringToBytes converts string to byte slice.\nfunc StringToBytes(s string) []byte {\n\treturn *(*[]byte)(unsafe.Pointer(\n\t\t&struct {\n\t\t\tstring\n\t\t\tCap int\n\t\t}{s, len(s)},\n\t))\n}\n"
  },
  {
    "path": "olric-server-docker.yaml",
    "content": "server:\n  # BindAddr denotes the address that Olric will bind to for communication\n  # with other Olric nodes.\n  bindAddr: 0.0.0.0\n\n  # BindPort denotes the address that Olric will bind to for communication\n  # with other Olric nodes.\n  bindPort: 3320\n\n  # KeepAlivePeriod denotes whether the operating system should send\n  # keep-alive messages on the connection.\n  keepAlivePeriod: 300s\n\n  # IdleClose will automatically close idle connections after the specified duration.\n  # Use zero to disable this feature.\n  # idleClose: 300s\n\n  # Timeout for bootstrap control\n  #\n  # An Olric node checks operation status before taking any action for the\n  # cluster events, responding incoming requests and running API functions.\n  # Bootstrapping status is one of the most important checkpoints for an\n  # \"operable\" Olric node. BootstrapTimeout sets a deadline to check\n  # bootstrapping status without blocking indefinitely.\n  bootstrapTimeout: 5s\n\n  # PartitionCount is 271, by default.\n  partitionCount: 271\n\n  # ReplicaCount is 1, by default.\n  replicaCount: 1\n\n  # Minimum number of successful writes to return a response for a write request.\n  writeQuorum: 1\n\n  # Minimum number of successful reads to return a response for a read request.\n  readQuorum: 1\n\n  # Switch to control read-repair algorithm which helps to reduce entropy.\n  readRepair: false\n\n  # Default value is SyncReplicationMode.\n  replicationMode: 0 # sync mode. for async, set 1\n\n  # Minimum number of members to form a cluster and run any query on the cluster.\n  memberCountQuorum: 1\n\n  # Coordinator member pushes the routing table to cluster members in the case of\n  # node join or left events. It also pushes the table periodically. routingTablePushInterval\n  # is the interval between subsequent calls. Default is 1 minute.\n  routingTablePushInterval: 1m\n\n  # Olric can send push cluster events to cluster.events channel. Available cluster events:\n  #\n  # * node-join-event\n  # * node-left-event\n  # * fragment-migration-event\n  # * fragment-received-event\n  #\n  # If you want to receive these events, set true to EnableClusterEventsChannel and subscribe to\n  # cluster.events channel. Default is false.\n  enableClusterEventsChannel: true\n\n#authentication:\n#  password: \"your-password\"\n\nclient:\n  # Timeout for TCP dial.\n  #\n  # The timeout includes name resolution, if required. When using TCP, and the host in the address parameter\n  # resolves to multiple IP addresses, the timeout is spread over each consecutive dial, such that each is\n  # given an appropriate fraction of the time to connect.\n  dialTimeout: 5s\n\n  # Timeout for socket reads. If reached, commands will fail\n  # with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.\n  # Default is DefaultReadTimeout\n  readTimeout: 3s\n\n  # Timeout for socket writes. If reached, commands will fail\n  # with a timeout instead of blocking.\n  # Default is DefaultWriteTimeout\n  writeTimeout: 3s\n\n  # Maximum number of retries before giving up.\n  # Default is 3 retries; -1 (not 0) disables retries.\n  #maxRetries: 3\n\n  # Minimum backoff between each retry.\n  # Default is 8 milliseconds; -1 disables backoff.\n  #minRetryBackoff: 8ms\n\n  # Maximum backoff between each retry.\n  # Default is 512 milliseconds; -1 disables backoff.\n  #maxRetryBackoff: 512ms\n\n  # Type of connection pool.\n  # true for FIFO pool, false for LIFO pool.\n  # Note that fifo has higher overhead compared to lifo.\n  #poolFIFO: false\n\n  # Maximum number of socket connections.\n  # Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.\n  #poolSize: 0\n\n  # Minimum number of idle connections which is useful when establishing\n  # new connection is slow.\n  #minIdleConns:\n\n  # Connection age at which client retires (closes) the connection.\n  # Default is to not close aged connections.\n  #maxConnAge:\n\n  # Amount of time client waits for connection if all connections are busy before\n  # returning an error. Default is ReadTimeout + 1 second.\n  #poolTimeout: 3s\n\n  # Amount of time after which client closes idle connections.\n  # Should be less than server's timeout.\n  # Default is 5 minutes. -1 disables idle timeout check.\n  idleTimeout: 5m\n\n  # Frequency of idle checks made by idle connections reaper.\n  # Default is 1 minute. -1 disables idle connections reaper,\n  # but idle connections are still discarded by the client\n  # if IdleTimeout is set.\n  idleCheckFrequency: 1m\n\n\nlogging:\n  # DefaultLogVerbosity denotes default log verbosity level.\n  #\n  # * 1 - Generally useful for this to ALWAYS be visible to an operator\n  #   * Programmer errors\n  #   * Logging extra info about a panic\n  #   * CLI argument handling\n  # * 2 - A reasonable default log level if you don't want verbosity.\n  #   * Information about config (listening on X, watching Y)\n  #   * Errors that repeat frequently that relate to conditions that can be\n  #     corrected\n  # * 3 - Useful steady state information about the service and\n  #     important log messages that may correlate to\n  #   significant changes in the system.  This is the recommended default log\n  #     level for most systems.\n  #   * Logging HTTP requests and their exit code\n  #   * System state changing\n  #   * Controller state change events\n  #   * Scheduler log messages\n  # * 4 - Extended information about changes\n  #   * More info about system state changes\n  # * 5 - Debug level verbosity\n  #   * Logging in particularly thorny parts of code where you may want to come\n  #     back later and check it\n  # * 6 - Trace level verbosity\n  #   * Context to understand the steps leading up to neterrors and warnings\n  #   * More information for troubleshooting reported issues\n  verbosity: 3\n\n  # Default LogLevel is DEBUG. Available levels: \"DEBUG\", \"WARN\", \"ERROR\", \"INFO\"\n  level: WARN\n  output: stderr\n\nmemberlist:\n  environment: lan\n\n  # Configuration related to what address to bind to and ports to\n  # listen on. The port is used for both UDP and TCP gossip. It is\n  # assumed other nodes are running on this port, but they do not need\n  # to.\n  bindAddr: 0.0.0.0\n  bindPort: 3322\n\n  # EnableCompression is used to control message compression. This can\n  # be used to reduce bandwidth usage at the cost of slightly more CPU\n  # utilization. This is only available starting at protocol version 1.\n  enableCompression: false\n\n  # JoinRetryInterval is the time gap between attempts to join an existing\n  # cluster.\n  joinRetryInterval: 1ms\n\n  # MaxJoinAttempts denotes the maximum number of attemps to join an existing\n  # cluster before forming a new one.\n  maxJoinAttempts: 1\n\n  # See service discovery plugins\n  #peers:\n  #  - \"localhost:3325\"\n\n  #advertiseAddr: \"\"\n  #advertisePort: 3322\n  #suspicionMaxTimeoutMult: 6\n  #disableTCPPings: false\n  #awarenessMaxMultiplier: 8\n  #gossipNodes: 3\n  #gossipVerifyIncoming: true\n  #gossipVerifyOutgoing: true\n  #dnsConfigPath: \"/etc/resolv.conf\"\n  #handoffQueueDepth: 1024\n  #udpBufferSize: 1400\n\ndmaps:\n  engine:\n    name: ramblock\n    config:\n      tableSize: 524288 # bytes\n#  checkEmptyFragmentsInterval: 1m\n#  triggerCompactionInterval: 10m\n#  numEvictionWorkers: 1\n#  maxIdleDuration: \"\"\n#  ttlDuration: \"100s\"\n#  maxKeys: 100000\n#  maxInuse: 1000000\n#  lRUSamples: 10\n#  evictionPolicy: \"LRU\"\n#  custom:\n#   foobar:\n#      maxIdleDuration: \"60s\"\n#      ttlDuration: \"300s\"\n#      maxKeys: 500000\n#      lRUSamples: 20\n#      evictionPolicy: \"NONE\"\n\n\n#serviceDiscovery:\n#  # path is a required property and used by Olric. It has to be a full path.\n#  path: \"/home/burak/go/src/github.com/olric-data/olric-consul-plugin/consul.so\"\n#\n#  # provider is just informal,\n#  provider: \"consul\"\n#\n#  # Plugin specific configuration\n#  # Consul server, used by the plugin. It's required\n#  address: \"http://127.0.0.1:8500\"\n#\n#  # Specifies that the server should return only nodes with all checks in the passing state.\n#  passingOnly: true\n#\n#  # Missing health checks from the request will be deleted from the agent. Using this parameter\n#  # allows to idempotently register a service and its checks without having to manually deregister\n#  # checks.\n#  replaceExistingChecks: true\n#\n#  # InsecureSkipVerify controls whether a client verifies the\n#  # server's certificate chain and host name.\n#  # If InsecureSkipVerify is true, TLS accepts any certificate\n#  # presented by the server and any host name in that certificate.\n#  # In this mode, TLS is susceptible to man-in-the-middle attacks.\n#  # This should be used only for testing.\n#  insecureSkipVerify: true\n#\n#  # service record\n#  payload: '\n#      {\n#          \"Name\": \"olric-cluster\",\n#          \"ID\": \"olric-node-1\",\n#          \"Tags\": [\n#            \"primary\",\n#            \"v1\"\n#          ],\n#          \"Address\": \"localhost\",\n#          \"Port\": 3322,\n#          \"EnableTagOverride\": false,\n#          \"check\": {\n#            \"name\": \"Olric node on 3322\",\n#            \"tcp\": \"0.0.0.0:3322\",\n#            \"interval\": \"10s\",\n#            \"timeout\": \"1s\"\n#          }\n#      }\n#'\n#\n#\n#serviceDiscovery:\n#  provider: \"k8s\"\n#  path: \"/Users/buraksezer/go/src/github.com/olric-data/olric-cloud-plugin/olric-cloud-plugin.so\"\n#  args: 'label_selector=\"app = olric-server\"'\n"
  },
  {
    "path": "olric.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n/*\nPackage olric provides a distributed cache and in-memory key/value data store.\nIt can be used both as an embedded Go library and as a language-independent\nservice.\n\nWith Olric, you can instantly create a fast, scalable, shared pool of RAM across\na cluster of computers.\n\nOlric is designed to be a distributed cache. But it also provides Publish/Subscribe,\ndata replication, failure detection and simple anti-entropy services.\nSo it can be used as an ordinary key/value data store to scale your cloud\napplication.\n*/\npackage olric\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/hashicorp/logutils\"\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/hasher\"\n\t\"github.com/olric-data/olric/internal/checkpoint\"\n\t\"github.com/olric-data/olric/internal/cluster/balancer\"\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/cluster/routingtable\"\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/internal/environment\"\n\t\"github.com/olric-data/olric/internal/locker\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/pubsub\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/pkg/flog\"\n\t\"github.com/pkg/errors\"\n\t\"github.com/tidwall/redcon\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\n// ReleaseVersion is the current stable version of Olric\nconst ReleaseVersion string = \"0.7.3\"\n\nvar (\n\t// ErrOperationTimeout is returned when an operation times out.\n\tErrOperationTimeout = errors.New(\"operation timeout\")\n\n\t// ErrServerGone means that a cluster member is closed unexpectedly.\n\tErrServerGone = errors.New(\"server is gone\")\n\n\t// ErrKeyNotFound means that returned when a key could not be found.\n\tErrKeyNotFound = errors.New(\"key not found\")\n\n\t// ErrKeyFound means that the requested key found in the cluster.\n\tErrKeyFound = errors.New(\"key found\")\n\n\t// ErrWriteQuorum means that write quorum cannot be reached to operate.\n\tErrWriteQuorum = errors.New(\"write quorum cannot be reached\")\n\n\t// ErrReadQuorum means that read quorum cannot be reached to operate.\n\tErrReadQuorum = errors.New(\"read quorum cannot be reached\")\n\n\t// ErrLockNotAcquired is returned when the requested lock could not be acquired\n\tErrLockNotAcquired = errors.New(\"lock not acquired\")\n\n\t// ErrNoSuchLock is returned when the requested lock does not exist\n\tErrNoSuchLock = errors.New(\"no such lock\")\n\n\t// ErrClusterQuorum means that the cluster could not reach a healthy numbers of members to operate.\n\tErrClusterQuorum = errors.New(\"failed to find enough peers to create quorum\")\n\n\t// ErrKeyTooLarge means that the given key is too large to process.\n\t// The maximum length of a key is 256 bytes.\n\tErrKeyTooLarge = errors.New(\"key too large\")\n\n\t// ErrEntryTooLarge returned if the required space for an entry is bigger than table size.\n\tErrEntryTooLarge = errors.New(\"entry too large for the configured table size\")\n\n\t// ErrConnRefused returned if the target node refused a connection request.\n\t// It is good to call RefreshMetadata to update the underlying data structures.\n\tErrConnRefused = errors.New(\"connection refused\")\n\n\t// ErrWrongPass indicates that the provided password is incorrect during authentication.\n\tErrWrongPass = errors.New(\"wrong password\")\n)\n\n// Olric implements a distributed cache and in-memory key/value data store.\n// It can be used both as an embedded Go library and as a language-independent\n// service.\ntype Olric struct {\n\t// name is BindAddr:BindPort. It defines servers unique name in the cluster.\n\tname     string\n\tenv      *environment.Environment\n\tconfig   *config.Config\n\tlog      *flog.Logger\n\thashFunc hasher.Hasher\n\n\t// Logical units to store data\n\tprimary *partitions.Partitions\n\tbackup  *partitions.Partitions\n\n\t// RESP server and clients.\n\tserver *server.Server\n\tclient *server.Client\n\n\trt       *routingtable.RoutingTable\n\tbalancer *balancer.Balancer\n\n\tpubsub *pubsub.Service\n\tdmap   *dmap.Service\n\n\t// Structures for flow control\n\tctx    context.Context\n\tcancel context.CancelFunc\n\twg     sync.WaitGroup\n\n\t// Callback function. Olric calls this after\n\t// the server is ready to accept new connections.\n\tstarted func()\n}\n\nfunc prepareConfig(c *config.Config) (*config.Config, error) {\n\tif c == nil {\n\t\treturn nil, fmt.Errorf(\"config cannot be nil\")\n\t}\n\n\terr := c.Sanitize()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = c.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = c.SetupNetworkConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.MemberlistConfig.Name = net.JoinHostPort(c.BindAddr,\n\t\tstrconv.Itoa(c.BindPort))\n\n\tfilter := &logutils.LevelFilter{\n\t\tLevels:   []logutils.LogLevel{\"DEBUG\", \"WARN\", \"ERROR\", \"INFO\"},\n\t\tMinLevel: logutils.LogLevel(strings.ToUpper(c.LogLevel)),\n\t\tWriter:   c.Logger.Writer(),\n\t}\n\tc.Logger.SetOutput(filter)\n\n\treturn c, nil\n}\n\nfunc initializeServices(db *Olric) error {\n\tdb.rt = routingtable.New(db.env)\n\tdb.env.Set(\"routingtable\", db.rt)\n\n\tdb.balancer = balancer.New(db.env)\n\n\t// Add Services\n\tdt, err := pubsub.NewService(db.env)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdb.pubsub = dt.(*pubsub.Service)\n\n\tdm, err := dmap.NewService(db.env)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdb.dmap = dm.(*dmap.Service)\n\n\treturn nil\n}\n\n// New creates a new Olric instance, otherwise returns an error.\nfunc New(c *config.Config) (*Olric, error) {\n\tvar err error\n\tc, err = prepareConfig(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := environment.New()\n\te.Set(\"config\", c)\n\n\t// Set the hash function. Olric distributes keys over partitions by hashing.\n\tpartitions.SetHashFunc(c.Hasher)\n\n\tflogger := flog.New(c.Logger)\n\tflogger.SetLevel(c.LogVerbosity)\n\tif c.LogLevel == \"DEBUG\" {\n\t\tflogger.ShowLineNumber(1)\n\t}\n\te.Set(\"logger\", flogger)\n\n\tif c.Authentication.Enabled() {\n\t\tc.Client.Authentication = c.Authentication\n\t}\n\tclient := server.NewClient(c.Client)\n\te.Set(\"client\", client)\n\te.Set(\"primary\", partitions.New(c.PartitionCount, partitions.PRIMARY))\n\te.Set(\"backup\", partitions.New(c.PartitionCount, partitions.BACKUP))\n\te.Set(\"locker\", locker.New())\n\tctx, cancel := context.WithCancel(context.Background())\n\tdb := &Olric{\n\t\tname:     c.MemberlistConfig.Name,\n\t\tenv:      e,\n\t\tlog:      flogger,\n\t\tconfig:   c,\n\t\thashFunc: c.Hasher,\n\t\tclient:   client,\n\t\tprimary:  e.Get(\"primary\").(*partitions.Partitions),\n\t\tbackup:   e.Get(\"backup\").(*partitions.Partitions),\n\t\tstarted:  c.Started,\n\t\tctx:      ctx,\n\t\tcancel:   cancel,\n\t}\n\n\t// Create a Redcon server instance\n\trc := &server.Config{\n\t\tBindAddr:        c.BindAddr,\n\t\tBindPort:        c.BindPort,\n\t\tKeepAlivePeriod: c.KeepAlivePeriod,\n\t\tRequireAuth:     c.Authentication.Enabled(),\n\t}\n\tsrv := server.New(rc, flogger)\n\tsrv.SetPreConditionFunc(db.preconditionFunc)\n\n\tdb.server = srv\n\te.Set(\"server\", srv)\n\n\terr = initializeServices(db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb.registerCommandHandlers()\n\tregisterErrors()\n\n\treturn db, nil\n}\n\nfunc (db *Olric) preconditionFunc(conn redcon.Conn, _ redcon.Command) bool {\n\terr := db.isOperable()\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (db *Olric) registerCommandHandlers() {\n\tdb.server.ServeMux().HandleFunc(protocol.Generic.Ping, db.pingCommandHandler)\n\tdb.server.ServeMux().HandleFunc(protocol.Cluster.RoutingTable, db.clusterRoutingTableCommandHandler)\n\tdb.server.ServeMux().HandleFunc(protocol.Generic.Stats, db.statsCommandHandler)\n\tdb.server.ServeMux().HandleFunc(protocol.Cluster.Members, db.clusterMembersCommandHandler)\n\tdb.server.ServeMux().HandleFunc(protocol.Generic.Auth, db.authCommandHandler)\n}\n\n// callStartedCallback checks passed checkpoint count and calls the callback\n// function.\nfunc (db *Olric) callStartedCallback() {\n\tdefer db.wg.Done()\n\n\ttimer := time.NewTimer(10 * time.Millisecond)\n\tdefer timer.Stop()\n\n\tfor {\n\t\ttimer.Reset(10 * time.Millisecond)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tif checkpoint.AllPassed() {\n\t\t\t\tif db.started != nil {\n\t\t\t\t\tdb.started()\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-db.ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc convertClusterError(err error) error {\n\tswitch {\n\tcase errors.Is(err, routingtable.ErrClusterQuorum):\n\t\treturn ErrClusterQuorum\n\tcase errors.Is(err, routingtable.ErrServerGone):\n\t\treturn ErrServerGone\n\tcase errors.Is(err, routingtable.ErrOperationTimeout):\n\t\treturn ErrOperationTimeout\n\tdefault:\n\t\treturn err\n\t}\n}\n\n// isOperable controls bootstrapping status and cluster quorum to prevent split-brain syndrome.\nfunc (db *Olric) isOperable() error {\n\tif err := db.rt.CheckMemberCountQuorum(); err != nil {\n\t\treturn convertClusterError(err)\n\t}\n\t// An Olric node has to be bootstrapped to function properly.\n\treturn db.rt.CheckBootstrap()\n}\n\n// Start starts background servers and joins the cluster. You still must call Shutdown\n// method if Start function returns an early error.\nfunc (db *Olric) Start() error {\n\tdb.log.V(1).Printf(\"[INFO] Olric %s on %s/%s %s\", ReleaseVersion, runtime.GOOS, runtime.GOARCH, runtime.Version())\n\n\t// This error group is responsible to run the TCP server at background and report errors.\n\terrGr, ctx := errgroup.WithContext(context.Background())\n\terrGr.Go(func() error {\n\t\treturn db.server.ListenAndServe()\n\t})\n\n\tselect {\n\tcase <-db.server.StartedCtx.Done():\n\t\t// TCP server has been started\n\tcase <-ctx.Done():\n\t\t// TCP server could not be started due to an error. There is no need to run\n\t\t// Olric.Shutdown here because we could not start anything.\n\t\treturn errGr.Wait()\n\t}\n\n\t// Balancer works periodically to balance partition data across the cluster.\n\tif err := db.balancer.Start(); err != nil {\n\t\tif err != nil {\n\t\t\tdb.log.V(2).Printf(\"[ERROR] Failed to run the balancer subsystem: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\n\t// First, we need to join the cluster. Then, the routing table has been started.\n\tif err := db.rt.Join(); err != nil {\n\t\tif err != nil {\n\t\t\tdb.log.V(2).Printf(\"[ERROR] Failed to join the Olric cluster: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\t// Start routing table service and member discovery subsystem.\n\tif err := db.rt.Start(); err != nil {\n\t\tif err != nil {\n\t\t\tdb.log.V(2).Printf(\"[ERROR] Failed to run the routing table subsystem: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\n\t// Start publish-subscribe service\n\tif err := db.pubsub.Start(); err != nil {\n\t\tif err != nil {\n\t\t\tdb.log.V(2).Printf(\"[ERROR] Failed to run the Publish-Subscribe service: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\n\t// Start distributed map service\n\tif err := db.dmap.Start(); err != nil {\n\t\tif err != nil {\n\t\t\tdb.log.V(2).Printf(\"[ERROR] Failed to run the Distributed Map service: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\n\t// Warn the user about his/her choice of configuration\n\tif db.config.ReplicationMode == config.AsyncReplicationMode && db.config.WriteQuorum > 1 {\n\t\tdb.log.V(2).\n\t\t\tPrintf(\"[WARN] Olric is running in async replication mode. WriteQuorum (%d) is ineffective\",\n\t\t\t\tdb.config.WriteQuorum)\n\t}\n\n\tif db.started != nil {\n\t\tdb.wg.Add(1)\n\t\tgo db.callStartedCallback()\n\t}\n\n\tdb.log.V(2).Printf(\"[INFO] Node name in the cluster: %s\",\n\t\tdb.name)\n\tif db.config.Interface != \"\" {\n\t\tdb.log.V(2).Printf(\"[INFO] Olric uses interface: %s\",\n\t\t\tdb.config.Interface)\n\t}\n\tdb.log.V(2).Printf(\"[INFO] Olric bindAddr: %s, bindPort: %d\",\n\t\tdb.config.BindAddr, db.config.BindPort)\n\tdb.log.V(2).Printf(\"[INFO] Replication count is %d\", db.config.ReplicaCount)\n\n\t// Wait for the TCP server.\n\treturn errGr.Wait()\n}\n\n// Shutdown stops background servers and leaves the cluster.\nfunc (db *Olric) Shutdown(ctx context.Context) error {\n\tselect {\n\tcase <-db.ctx.Done():\n\t\t// Shutdown only once.\n\t\treturn nil\n\tdefault:\n\t}\n\n\tdb.cancel()\n\n\tvar latestError error\n\n\tif err := db.pubsub.Shutdown(ctx); err != nil {\n\t\tdb.log.V(2).Printf(\"[ERROR] Failed to shutdown PubSub service: %v\", err)\n\t\tlatestError = err\n\t}\n\n\tif err := db.dmap.Shutdown(ctx); err != nil {\n\t\tdb.log.V(2).Printf(\"[ERROR] Failed to shutdown DMap service: %v\", err)\n\t\tlatestError = err\n\t}\n\n\tif err := db.balancer.Shutdown(ctx); err != nil {\n\t\tdb.log.V(2).Printf(\"[ERROR] Failed to shutdown balancer service: %v\", err)\n\t\tlatestError = err\n\t}\n\n\tif err := db.rt.Shutdown(ctx); err != nil {\n\t\tdb.log.V(2).Printf(\"[ERROR] Failed to shutdown routing table service: %v\", err)\n\t\tlatestError = err\n\t}\n\n\t// Shutdown Redcon server\n\tif err := db.server.Shutdown(ctx); err != nil {\n\t\tdb.log.V(2).Printf(\"[ERROR] Failed to shutdown RESP server: %v\", err)\n\t\tlatestError = err\n\t}\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer func() {\n\t\t\tclose(done)\n\t\t}()\n\t\tdb.wg.Wait()\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\tcase <-done:\n\t}\n\n\t// db.name will be shown as empty string, if the program is killed before\n\t// bootstrapping.\n\tdb.log.V(2).Printf(\"[INFO] %s is gone\", db.name)\n\treturn latestError\n}\n\nfunc convertDMapError(err error) error {\n\tswitch {\n\tcase errors.Is(err, dmap.ErrKeyFound):\n\t\treturn ErrKeyFound\n\tcase errors.Is(err, dmap.ErrKeyNotFound):\n\t\treturn ErrKeyNotFound\n\tcase errors.Is(err, dmap.ErrDMapNotFound):\n\t\treturn ErrKeyNotFound\n\tcase errors.Is(err, dmap.ErrLockNotAcquired):\n\t\treturn ErrLockNotAcquired\n\tcase errors.Is(err, dmap.ErrNoSuchLock):\n\t\treturn ErrNoSuchLock\n\tcase errors.Is(err, dmap.ErrReadQuorum):\n\t\treturn ErrReadQuorum\n\tcase errors.Is(err, dmap.ErrWriteQuorum):\n\t\treturn ErrWriteQuorum\n\tcase errors.Is(err, dmap.ErrServerGone):\n\t\treturn ErrServerGone\n\tcase errors.Is(err, dmap.ErrKeyTooLarge):\n\t\treturn ErrKeyTooLarge\n\tcase errors.Is(err, dmap.ErrEntryTooLarge):\n\t\treturn ErrEntryTooLarge\n\tdefault:\n\t\treturn convertClusterError(err)\n\t}\n}\n\n// registerErrors registers application-specific errors with their corresponding prefixes in the error management system.\nfunc registerErrors() {\n\tprotocol.SetError(\"WRONGPASS\", ErrWrongPass)\n}\n"
  },
  {
    "path": "olric_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/hashicorp/memberlist\"\n\t\"github.com/olric-data/olric/config\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/olric-data/olric/stats\"\n\t\"github.com/stretchr/testify/require\"\n)\n\n// newTestOlricWithConfig creates a new Olric instance with the given configuration.\n// This function is intended for internal use. Please use testOlricCluster and its\n// methods to form a cluster in tests.\nfunc newTestOlricWithConfig(t *testing.T, c *config.Config) *Olric {\n\tport, err := testutil.GetFreePort()\n\trequire.NoError(t, err)\n\n\tif c.MemberlistConfig == nil {\n\t\tc.MemberlistConfig = memberlist.DefaultLocalConfig()\n\t}\n\tc.MemberlistConfig.BindPort = 0\n\n\tc.BindAddr = \"127.0.0.1\"\n\tc.BindPort = port\n\n\terr = c.Sanitize()\n\trequire.NoError(t, err)\n\n\terr = c.Validate()\n\trequire.NoError(t, err)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tc.Started = func() {\n\t\tcancel()\n\t}\n\n\tdb, err := New(c)\n\trequire.NoError(t, err)\n\n\tgo func() {\n\t\tif err := db.Start(); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Failed to run Olric: %v\", err))\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"Olric cannot be started in one second\")\n\tcase <-ctx.Done():\n\t\t// everything is fine\n\t}\n\n\treturn db\n}\n\ntype testOlricCluster struct {\n\tmtx     sync.Mutex\n\tmembers map[string]*Olric\n}\n\nfunc newTestOlricCluster(t *testing.T) *testOlricCluster {\n\tcl := &testOlricCluster{members: make(map[string]*Olric)}\n\tt.Cleanup(func() {\n\t\tcl.mtx.Lock()\n\t\tdefer cl.mtx.Unlock()\n\t\tfor _, member := range cl.members {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\t\terr := member.Shutdown(ctx)\n\t\t\tcancel()\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t})\n\treturn cl\n}\n\nfunc (cl *testOlricCluster) addMemberWithConfig(t *testing.T, c *config.Config) *Olric {\n\tcl.mtx.Lock()\n\tdefer cl.mtx.Unlock()\n\n\tif c == nil {\n\t\tc = testutil.NewConfig()\n\t}\n\n\tfor _, member := range cl.members {\n\t\tc.Peers = append(c.Peers, member.rt.Discovery().LocalNode().Address())\n\t}\n\n\tdb := newTestOlricWithConfig(t, c)\n\tcl.members[db.rt.This().String()] = db\n\tt.Logf(\"A new cluster member has been created: %s\", db.rt.This())\n\treturn db\n}\n\nfunc (cl *testOlricCluster) addMember(t *testing.T) *Olric {\n\treturn cl.addMemberWithConfig(t, nil)\n}\n\nfunc TestOlric_StartAndShutdown(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\terr := db.Shutdown(context.Background())\n\trequire.NoError(t, err)\n}\n\nfunc TestOlricCluster_StartAndShutdown(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tcluster.addMember(t)\n\tdb := cluster.addMember(t)\n\trequire.Len(t, cluster.members, 2)\n\n\te := db.NewEmbeddedClient()\n\tst, err := e.Stats(context.Background(), db.rt.This().String())\n\trequire.NoError(t, err)\n\trequire.Len(t, st.ClusterMembers, 2)\n\tfor _, member := range cluster.members {\n\t\trequire.Contains(t, st.ClusterMembers, stats.MemberID(member.rt.This().ID))\n\t}\n}\n"
  },
  {
    "path": "ping.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"strings\"\n\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/tidwall/redcon\"\n)\n\nconst DefaultPingResponse = \"PONG\"\n\nfunc (db *Olric) ping(ctx context.Context, addr, message string) ([]byte, error) {\n\tmessage = strings.TrimSpace(message)\n\n\tpingCmd := protocol.NewPing()\n\tif message != \"\" {\n\t\tpingCmd = pingCmd.SetMessage(message)\n\t}\n\n\tcmd := pingCmd.Command(ctx)\n\trc := db.client.Get(addr)\n\terr := rc.Process(ctx, cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cmd.Bytes()\n}\n\nfunc (db *Olric) pingCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tpingCmd, err := protocol.ParsePingCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tif pingCmd.Message != \"\" {\n\t\tconn.WriteString(pingCmd.Message)\n\t\treturn\n\t}\n\tconn.WriteString(DefaultPingResponse)\n}\n"
  },
  {
    "path": "ping_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestOlric_Ping(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tresult, err := db.ping(context.Background(), db.rt.This().String(), \"\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, []byte(DefaultPingResponse), result)\n}\n\nfunc TestOlric_PingWithMessage(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tmsg := \"Olric rocks!\"\n\tresponse, err := db.ping(context.Background(), db.rt.This().String(), msg)\n\trequire.NoError(t, err)\n\trequire.Equal(t, []byte(msg), response)\n}\n"
  },
  {
    "path": "pipeline.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/resp\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"golang.org/x/sync/errgroup\"\n\t\"golang.org/x/sync/semaphore\"\n)\n\nvar (\n\t// ErrNotReady denotes that the Future instance you hold is not ready to read the response yet.\n\tErrNotReady = errors.New(\"not ready yet\")\n\n\t// ErrPipelineClosed denotes that the underlying pipeline is closed, and it's impossible to operate.\n\tErrPipelineClosed = errors.New(\"pipeline is closed\")\n\n\t// ErrPipelineExecuted denotes that Exec was already called on the underlying pipeline.\n\tErrPipelineExecuted = errors.New(\"pipeline already executed\")\n)\n\n// DMapPipeline implements a pipeline for the following methods of the DMap API:\n//\n// * Put\n// * Get\n// * Delete\n// * Incr\n// * Decr\n// * GetPut\n// * IncrByFloat\n//\n// DMapPipeline enables batch operations on DMap data.\ntype DMapPipeline struct {\n\tmtx          sync.Mutex\n\tdm           *ClusterDMap\n\tcommands     map[uint64][]redis.Cmder\n\tresult       map[uint64][]redis.Cmder\n\tctx          context.Context\n\tcancel       context.CancelFunc\n\tclosedCtx    context.Context // used to detect if the pipeline is closed / discarded\n\tclosedCancel context.CancelFunc\n\n\tconcurrency int // defaults to runtime.NumCPU()\n}\n\nfunc (dp *DMapPipeline) addCommand(key string, cmd redis.Cmder) (uint64, int) {\n\tdp.mtx.Lock()\n\tdefer dp.mtx.Unlock()\n\n\thkey := partitions.HKey(dp.dm.name, key)\n\tpartID := hkey % dp.dm.clusterClient.partitionCount\n\n\tcmds, ok := dp.commands[partID]\n\tif !ok {\n\t\t// if there are no existing commands, get a new slice from the pool\n\t\tcmds = getPipelineCmdsFromPool()\n\t}\n\tdp.commands[partID] = append(cmds, cmd)\n\n\treturn partID, len(dp.commands[partID]) - 1\n}\n\n// FuturePut is used to read the result of a pipelined Put command.\ntype FuturePut struct {\n\tdp        *DMapPipeline\n\tpartID    uint64\n\tindex     int\n\tctx       context.Context\n\tclosedCtx context.Context\n}\n\n// Result returns a response for the pipelined Put command.\nfunc (f *FuturePut) Result() error {\n\t// this select is separate from the one below on purpose, since select is non-deterministic if multiple\n\t// cases are available, and we need to guarantee this check first.\n\tselect {\n\tcase <-f.closedCtx.Done():\n\t\treturn ErrPipelineClosed\n\tdefault:\n\t}\n\n\tselect {\n\tcase <-f.ctx.Done():\n\t\tcmd := f.dp.result[f.partID][f.index]\n\t\treturn processProtocolError(cmd.Err())\n\tdefault:\n\t\treturn ErrNotReady\n\t}\n}\n\n// Put queues a Put command. The parameters are identical to the DMap.Put,\n// but it returns FuturePut to read the batched response.\nfunc (dp *DMapPipeline) Put(ctx context.Context, key string, value interface{}, options ...PutOption) (*FuturePut, error) {\n\tbuf := bytes.NewBuffer(nil)\n\n\tenc := resp.New(buf)\n\terr := enc.Encode(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar pc dmap.PutConfig\n\tfor _, opt := range options {\n\t\topt(&pc)\n\t}\n\n\tcmd := dp.dm.writePutCommand(&pc, key, buf.Bytes()).Command(ctx)\n\tpartID, index := dp.addCommand(key, cmd)\n\treturn &FuturePut{\n\t\tdp:        dp,\n\t\tpartID:    partID,\n\t\tindex:     index,\n\t\tctx:       dp.ctx,\n\t\tclosedCtx: dp.closedCtx,\n\t}, nil\n}\n\n// FutureGet is used to read result of a pipelined Get command.\ntype FutureGet struct {\n\tdp        *DMapPipeline\n\tpartID    uint64\n\tindex     int\n\tctx       context.Context\n\tclosedCtx context.Context\n}\n\n// Result returns a response for the pipelined Get command.\nfunc (f *FutureGet) Result() (*GetResponse, error) {\n\t// this select is separate from the one below on purpose, since select is non-deterministic if multiple\n\t// cases are available, and we need to guarantee this check first.\n\tselect {\n\tcase <-f.closedCtx.Done():\n\t\treturn nil, ErrPipelineClosed\n\tdefault:\n\t}\n\n\tselect {\n\tcase <-f.ctx.Done():\n\t\tcmd := f.dp.result[f.partID][f.index]\n\t\tif cmd.Err() != nil {\n\t\t\treturn nil, processProtocolError(cmd.Err())\n\t\t}\n\t\tstringCmd := redis.NewStringCmd(context.Background(), cmd.Args()...)\n\t\tstringCmd.SetVal(cmd.(*redis.Cmd).Val().(string))\n\t\treturn f.dp.dm.makeGetResponse(stringCmd)\n\tdefault:\n\t\treturn nil, ErrNotReady\n\t}\n}\n\n// Get queues a Get command. The parameters are identical to the DMap.Get,\n// but it returns FutureGet to read the batched response.\nfunc (dp *DMapPipeline) Get(ctx context.Context, key string) *FutureGet {\n\tcmd := protocol.NewGet(dp.dm.name, key).SetRaw().Command(ctx)\n\tpartID, index := dp.addCommand(key, cmd)\n\treturn &FutureGet{\n\t\tdp:        dp,\n\t\tpartID:    partID,\n\t\tindex:     index,\n\t\tctx:       dp.ctx,\n\t\tclosedCtx: dp.closedCtx,\n\t}\n}\n\n// FutureDelete is used to read the result of a pipelined Delete command.\ntype FutureDelete struct {\n\tdp        *DMapPipeline\n\tpartID    uint64\n\tindex     int\n\tctx       context.Context\n\tclosedCtx context.Context\n}\n\n// Result returns a response for the pipelined Delete command.\nfunc (f *FutureDelete) Result() (int, error) {\n\t// this select is separate from the one below on purpose, since select is non-deterministic if multiple\n\t// cases are available, and we need to guarantee this check first.\n\tselect {\n\tcase <-f.closedCtx.Done():\n\t\treturn 0, ErrPipelineClosed\n\tdefault:\n\t}\n\n\tselect {\n\tcase <-f.ctx.Done():\n\t\tcmd := f.dp.result[f.partID][f.index]\n\t\tif cmd.Err() != nil {\n\t\t\treturn 0, processProtocolError(cmd.Err())\n\t\t}\n\t\treturn int(cmd.(*redis.Cmd).Val().(int64)), nil\n\tdefault:\n\t\treturn 0, ErrNotReady\n\t}\n}\n\n// Delete queues a Delete command. The parameters are identical to the DMap.Delete,\n// but it returns FutureDelete to read the batched response.\nfunc (dp *DMapPipeline) Delete(ctx context.Context, key string) *FutureDelete {\n\tcmd := protocol.NewDel(dp.dm.name, []string{key}...).Command(ctx)\n\tpartID, index := dp.addCommand(key, cmd)\n\treturn &FutureDelete{\n\t\tdp:        dp,\n\t\tpartID:    partID,\n\t\tindex:     index,\n\t\tctx:       dp.ctx,\n\t\tclosedCtx: dp.closedCtx,\n\t}\n}\n\n// FutureExpire is used to read the result of a pipelined Expire command.\ntype FutureExpire struct {\n\tdp        *DMapPipeline\n\tpartID    uint64\n\tindex     int\n\tctx       context.Context\n\tclosedCtx context.Context\n}\n\n// Result returns a response for the pipelined Expire command.\nfunc (f *FutureExpire) Result() error {\n\t// this select is separate from the one below on purpose, since select is non-deterministic if multiple\n\t// cases are available, and we need to guarantee this check first.\n\tselect {\n\tcase <-f.closedCtx.Done():\n\t\treturn ErrPipelineClosed\n\tdefault:\n\t}\n\n\tselect {\n\tcase <-f.ctx.Done():\n\t\tcmd := f.dp.result[f.partID][f.index]\n\t\treturn processProtocolError(cmd.Err())\n\tdefault:\n\t\treturn ErrNotReady\n\t}\n}\n\n// Expire queues an Expire command. The parameters are identical to the DMap.Expire,\n// but it returns FutureExpire to read the batched response.\nfunc (dp *DMapPipeline) Expire(ctx context.Context, key string, timeout time.Duration) (*FutureExpire, error) {\n\tcmd := protocol.NewExpire(dp.dm.name, key, timeout).Command(ctx)\n\tpartID, index := dp.addCommand(key, cmd)\n\treturn &FutureExpire{\n\t\tdp:        dp,\n\t\tpartID:    partID,\n\t\tindex:     index,\n\t\tctx:       dp.ctx,\n\t\tclosedCtx: dp.closedCtx,\n\t}, nil\n}\n\n// FutureIncr is used to read the result of a pipelined Incr command.\ntype FutureIncr struct {\n\tdp        *DMapPipeline\n\tpartID    uint64\n\tindex     int\n\tctx       context.Context\n\tclosedCtx context.Context\n}\n\n// Result returns a response for the pipelined Incr command.\nfunc (f *FutureIncr) Result() (int, error) {\n\t// this select is separate from the one below on purpose, since select is non-deterministic if multiple\n\t// cases are available, and we need to guarantee this check first.\n\tselect {\n\tcase <-f.closedCtx.Done():\n\t\treturn 0, ErrPipelineClosed\n\tdefault:\n\t}\n\n\tselect {\n\tcase <-f.ctx.Done():\n\t\tcmd := f.dp.result[f.partID][f.index]\n\t\tif cmd.Err() != nil {\n\t\t\treturn 0, processProtocolError(cmd.Err())\n\t\t}\n\t\treturn int(cmd.(*redis.Cmd).Val().(int64)), nil\n\tdefault:\n\t\treturn 0, ErrNotReady\n\t}\n}\n\n// Incr queues an Incr command. The parameters are identical to the DMap.Incr,\n// but it returns FutureIncr to read the batched response.\nfunc (dp *DMapPipeline) Incr(ctx context.Context, key string, delta int) (*FutureIncr, error) {\n\tcmd := protocol.NewIncr(dp.dm.name, key, delta).Command(ctx)\n\tpartID, index := dp.addCommand(key, cmd)\n\treturn &FutureIncr{\n\t\tdp:        dp,\n\t\tpartID:    partID,\n\t\tindex:     index,\n\t\tctx:       dp.ctx,\n\t\tclosedCtx: dp.closedCtx,\n\t}, nil\n}\n\n// FutureDecr is used to read the result of a pipelined Decr command.\ntype FutureDecr struct {\n\tdp        *DMapPipeline\n\tpartID    uint64\n\tindex     int\n\tctx       context.Context\n\tclosedCtx context.Context\n}\n\n// Result returns a response for the pipelined Decr command.\nfunc (f *FutureDecr) Result() (int, error) {\n\t// this select is separate from the one below on purpose, since select is non-deterministic if multiple\n\t// cases are available, and we need to guarantee this check first.\n\tselect {\n\tcase <-f.closedCtx.Done():\n\t\treturn 0, ErrPipelineClosed\n\tdefault:\n\t}\n\n\tselect {\n\tcase <-f.ctx.Done():\n\t\tcmd := f.dp.result[f.partID][f.index]\n\t\tif cmd.Err() != nil {\n\t\t\treturn 0, processProtocolError(cmd.Err())\n\t\t}\n\t\treturn int(cmd.(*redis.Cmd).Val().(int64)), nil\n\tdefault:\n\t\treturn 0, ErrNotReady\n\t}\n}\n\n// Decr queues a Decr command. The parameters are identical to the DMap.Decr,\n// but it returns FutureDecr to read the batched response.\nfunc (dp *DMapPipeline) Decr(ctx context.Context, key string, delta int) (*FutureDecr, error) {\n\tcmd := protocol.NewDecr(dp.dm.name, key, delta).Command(ctx)\n\tpartID, index := dp.addCommand(key, cmd)\n\treturn &FutureDecr{\n\t\tdp:        dp,\n\t\tpartID:    partID,\n\t\tindex:     index,\n\t\tctx:       dp.ctx,\n\t\tclosedCtx: dp.closedCtx,\n\t}, nil\n}\n\n// FutureGetPut is used to read the result of a pipelined GetPut command.\ntype FutureGetPut struct {\n\tdp        *DMapPipeline\n\tpartID    uint64\n\tindex     int\n\tctx       context.Context\n\tclosedCtx context.Context\n}\n\n// Result returns a response for the pipelined GetPut command.\nfunc (f *FutureGetPut) Result() (*GetResponse, error) {\n\t// this select is separate from the one below on purpose, since select is non-deterministic if multiple\n\t// cases are available, and we need to guarantee this check first.\n\tselect {\n\tcase <-f.closedCtx.Done():\n\t\treturn nil, ErrPipelineClosed\n\tdefault:\n\t}\n\n\tselect {\n\tcase <-f.ctx.Done():\n\t\tcmd := f.dp.result[f.partID][f.index]\n\t\tif cmd.Err() == redis.Nil {\n\t\t\t// This should be the first run.\n\t\t\treturn nil, nil\n\t\t}\n\t\tif cmd.Err() != nil {\n\t\t\treturn nil, processProtocolError(cmd.Err())\n\t\t}\n\t\tstringCmd := redis.NewStringCmd(context.Background(), cmd.Args()...)\n\t\tstringCmd.SetVal(cmd.(*redis.Cmd).Val().(string))\n\t\treturn f.dp.dm.makeGetResponse(stringCmd)\n\tdefault:\n\t\treturn nil, ErrNotReady\n\t}\n}\n\n// GetPut queues a GetPut command. The parameters are identical to the DMap.GetPut,\n// but it returns FutureGetPut to read the batched response.\nfunc (dp *DMapPipeline) GetPut(ctx context.Context, key string, value interface{}) (*FutureGetPut, error) {\n\tbuf := bytes.NewBuffer(nil)\n\n\tenc := resp.New(buf)\n\terr := enc.Encode(value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcmd := protocol.NewGetPut(dp.dm.name, key, buf.Bytes()).SetRaw().Command(ctx)\n\tpartID, index := dp.addCommand(key, cmd)\n\treturn &FutureGetPut{\n\t\tdp:        dp,\n\t\tpartID:    partID,\n\t\tindex:     index,\n\t\tctx:       dp.ctx,\n\t\tclosedCtx: dp.closedCtx,\n\t}, nil\n}\n\n// FutureIncrByFloat is used to read the result of a pipelined IncrByFloat command.\ntype FutureIncrByFloat struct {\n\tdp        *DMapPipeline\n\tpartID    uint64\n\tindex     int\n\tctx       context.Context\n\tclosedCtx context.Context\n}\n\n// Result returns a response for the pipelined IncrByFloat command.\nfunc (f *FutureIncrByFloat) Result() (float64, error) {\n\t// this select is separate from the one below on purpose, since select is non-deterministic if multiple\n\t// cases are available, and we need to guarantee this check first.\n\tselect {\n\tcase <-f.closedCtx.Done():\n\t\treturn 0, ErrPipelineClosed\n\tdefault:\n\t}\n\n\tselect {\n\tcase <-f.ctx.Done():\n\t\tcmd := f.dp.result[f.partID][f.index]\n\t\tif cmd.Err() != nil {\n\t\t\treturn 0, processProtocolError(cmd.Err())\n\t\t}\n\t\tstringRes := cmd.(*redis.Cmd).Val().(string)\n\t\treturn strconv.ParseFloat(stringRes, 64)\n\tdefault:\n\t\treturn 0, ErrNotReady\n\t}\n}\n\n// IncrByFloat queues an IncrByFloat command. The parameters are identical to the DMap.IncrByFloat,\n// but it returns FutureIncrByFloat to read the batched response.\nfunc (dp *DMapPipeline) IncrByFloat(ctx context.Context, key string, delta float64) (*FutureIncrByFloat, error) {\n\tcmd := protocol.NewIncrByFloat(dp.dm.name, key, delta).Command(ctx)\n\tpartID, index := dp.addCommand(key, cmd)\n\treturn &FutureIncrByFloat{\n\t\tdp:        dp,\n\t\tpartID:    partID,\n\t\tindex:     index,\n\t\tctx:       dp.ctx,\n\t\tclosedCtx: dp.closedCtx,\n\t}, nil\n}\n\nfunc (dp *DMapPipeline) execOnPartition(ctx context.Context, partID uint64) error {\n\trc, err := dp.dm.clusterClient.clientByPartID(partID)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// There is no need to protect dp.commands map and its content.\n\t// It's already filled before running Exec, and it's now a read-only\n\t// data structure\n\tcommands := dp.commands[partID]\n\tpipe := rc.Pipeline()\n\n\tfor _, cmd := range commands {\n\t\tpipe.Do(ctx, cmd.Args()...)\n\t}\n\n\t// Exec executes all previously queued commands using one\n\t// client-server roundtrip.\n\t//\n\t// Exec always returns list of commands and error of the first failed\n\t// command if any.\n\tresult, _ := pipe.Exec(ctx)\n\tdp.mtx.Lock()\n\tdp.result[partID] = result\n\tdp.mtx.Unlock()\n\treturn nil\n}\n\n// Exec executes all queued commands using one client-server roundtrip per partition.\nfunc (dp *DMapPipeline) Exec(ctx context.Context) error {\n\t// this select is separate from the one below on purpose, since select is non-deterministic if multiple\n\t// cases are available, and we need to guarantee this check first.\n\tselect {\n\tcase <-dp.closedCtx.Done():\n\t\treturn ErrPipelineClosed\n\tdefault:\n\t}\n\n\t// this checks to see if Exec has already run. While Exec should only be called once, it is possible that\n\t// the user could call Exec multiple times. If we stored the result of errGr.Wait on the pipeline, we could\n\t// return that error and make Exec idempotent.\n\tselect {\n\tcase <-dp.ctx.Done():\n\t\treturn ErrPipelineExecuted\n\tdefault:\n\t}\n\n\tdefer dp.cancel()\n\n\tvar errGr errgroup.Group\n\tsem := semaphore.NewWeighted(int64(dp.concurrency))\n\tfor i := uint64(0); i < dp.dm.clusterClient.partitionCount; i++ {\n\t\terr := sem.Acquire(ctx, 1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpartID := i\n\t\terrGr.Go(func() error {\n\t\t\tdefer sem.Release(1)\n\t\t\t// If execOnPartition returns an error, it will eventually stop\n\t\t\t// all flush operation.\n\t\t\treturn dp.execOnPartition(ctx, partID)\n\t\t})\n\t}\n\n\treturn errGr.Wait()\n}\n\n// Discard discards the pipelined commands and resets all internal states.\n// A pipeline can be reused after calling Discard.\nfunc (dp *DMapPipeline) Discard() error {\n\tselect {\n\tcase <-dp.closedCtx.Done():\n\t\treturn ErrPipelineClosed\n\tdefault:\n\t}\n\n\tdp.closedCancel()\n\n\tdp.mtx.Lock()\n\tdefer dp.mtx.Unlock()\n\n\t// return all command slices to the pool\n\n\tfor _, v := range dp.commands {\n\t\tputPipelineCmdsIntoPool(v)\n\t}\n\n\tfor _, v := range dp.result {\n\t\tputPipelineCmdsIntoPool(v)\n\t}\n\n\t// the deletes below are purposefully not combined with the loops above, as these are recognized and optimized\n\t// by the compiler. https://go-review.googlesource.com/c/go/+/110055\n\n\tfor k := range dp.commands {\n\t\tdelete(dp.commands, k)\n\t}\n\n\tfor k := range dp.result {\n\t\tdelete(dp.result, k)\n\t}\n\n\tdp.initContexts()\n\n\treturn nil\n}\n\n// Close closes the pipeline and frees the allocated resources. You shouldn't try to\n// reuse a closed pipeline.\nfunc (dp *DMapPipeline) Close() {\n\tdp.closedCancel()\n}\n\n// Pipeline is a mechanism to realise Redis Pipeline technique.\n//\n// Pipelining is a technique to extremely speed up processing by packing\n// operations to batches, send them at once to Redis and read a replies in a\n// singe step.\n// See https://redis.io/topics/pipelining\n//\n// Pay attention, that Pipeline is not a transaction, so you can get unexpected\n// results in case of big pipelines and small read/write timeouts.\n// Redis client has retransmission logic in case of timeouts, pipeline\n// can be retransmitted and commands can be executed more than once.\nfunc (dm *ClusterDMap) Pipeline(opts ...PipelineOption) (*DMapPipeline, error) {\n\tdp := &DMapPipeline{\n\t\tdm:       dm,\n\t\tcommands: make(map[uint64][]redis.Cmder),\n\t\tresult:   make(map[uint64][]redis.Cmder),\n\n\t\tconcurrency: runtime.NumCPU(),\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(dp)\n\t}\n\n\tdp.initContexts()\n\n\treturn dp, nil\n}\n\n// initContexts sets up chained contexts for the pipeline. The base is closedCtx, which is closed either in\n// Close or Discard. ctx is a child of closedCtx, as we want to cancel the pipeline if it is closed. It is\n// canceled in Exec, and used to block FutureXXX.Result() calls until Exec has completed.\nfunc (dp *DMapPipeline) initContexts() {\n\tdp.closedCtx, dp.closedCancel = context.WithCancel(context.Background())\n\tdp.ctx, dp.cancel = context.WithCancel(dp.closedCtx)\n}\n\n// This stores a slice of commands for each partition. There is a possibility that a single\n// large slice could be allocated with an unusually large number of commands in a single pipeline that\n// are very unbalanced across partitions, but that is unlikely to be a problem in practice.\n//\n// It does not store a pointer to the slice as recommended by staticcheck because that is harder to reason\n// about, and a single allocation is not a big deal compared to the slices we're able to reuse.\n// https://staticcheck.io/docs/checks#SA6002\n// https://github.com/dominikh/go-tools/issues/1336#issuecomment-1331206290\nvar pipelineCmdPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn make([]redis.Cmder, 0)\n\t},\n}\n\nfunc getPipelineCmdsFromPool() []redis.Cmder {\n\treturn pipelineCmdPool.Get().([]redis.Cmder)\n}\n\nfunc putPipelineCmdsIntoPool(cmds []redis.Cmder) {\n\t// remove references to underlying commands so they can be GCed\n\tfor i := range cmds {\n\t\tcmds[i] = nil\n\t}\n\tcmds = cmds[:0]\n\tpipelineCmdPool.Put(cmds)\n}\n"
  },
  {
    "path": "pipeline_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDMapPipeline_Put(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfutures := make(map[int]*FuturePut)\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tfor i := 0; i < 100; i++ {\n\t\tfp, err := pipe.Put(ctx, testutil.ToKey(i), testutil.ToVal(i))\n\t\trequire.NoError(t, err)\n\t\tfutures[i] = fp\n\t}\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor _, fp := range futures {\n\t\trequire.NoError(t, fp.Result())\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\tkey := testutil.ToKey(i)\n\t\tgr, err := dm.Get(ctx, key)\n\t\trequire.NoError(t, err)\n\n\t\tvalue, err := gr.Byte()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), value)\n\t}\n}\n\nfunc TestDMapPipeline_Get(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i))\n\t\trequire.NoError(t, err)\n\t}\n\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tfutures := make(map[int]*FutureGet)\n\tfor i := 0; i < 100; i++ {\n\t\tfg := pipe.Get(ctx, testutil.ToKey(i))\n\t\tfutures[i] = fg\n\t}\n\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor i, fg := range futures {\n\t\tgr, err := fg.Result()\n\t\trequire.NoError(t, err)\n\n\t\tvalue, err := gr.Byte()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), value)\n\t}\n}\n\nfunc TestDMapPipeline_Delete(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i))\n\t\trequire.NoError(t, err)\n\t}\n\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tfutures := make(map[int]*FutureDelete)\n\tfor i := 0; i < 100; i++ {\n\t\tfd := pipe.Delete(ctx, testutil.ToKey(i))\n\t\tfutures[i] = fd\n\t}\n\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor _, fd := range futures {\n\t\tnum, err := fd.Result()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, 1, num)\n\t}\n}\n\nfunc TestDMapPipeline_Expire(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i))\n\t\trequire.NoError(t, err)\n\t}\n\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tfutures := make(map[int]*FutureExpire)\n\tfor i := 0; i < 100; i++ {\n\t\tfd, err := pipe.Expire(ctx, testutil.ToKey(i), time.Hour)\n\t\trequire.NoError(t, err)\n\t\tfutures[i] = fd\n\t}\n\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor _, fd := range futures {\n\t\terr := fd.Result()\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\tgr, err := dm.Get(ctx, testutil.ToKey(i))\n\t\trequire.NoError(t, err)\n\t\trequire.NotEqual(t, int64(0), gr.TTL())\n\t}\n}\n\nfunc TestDMapPipeline_Incr(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfutures := make(map[int]*FutureIncr)\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tfor i := 0; i < 100; i++ {\n\t\tfi, err := pipe.Incr(ctx, \"mykey\", 1)\n\t\trequire.NoError(t, err)\n\t\tfutures[i] = fi\n\t}\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor i, fp := range futures {\n\t\tnum, err := fp.Result()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, i+1, num)\n\t}\n}\n\nfunc TestDMapPipeline_Decr(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfutures := make(map[int]*FutureDecr)\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tfor i := 0; i < 100; i++ {\n\t\tfi, err := pipe.Decr(ctx, \"mykey\", 1)\n\t\trequire.NoError(t, err)\n\t\tfutures[i] = fi\n\t}\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor i, fp := range futures {\n\t\tnum, err := fp.Result()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, -1*(i+1), num)\n\t}\n}\n\nfunc TestDMapPipeline_GetPut(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfutures := make(map[int]*FutureGetPut)\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tfor i := 0; i < 100; i++ {\n\t\tfi, err := pipe.GetPut(ctx, \"key\", testutil.ToVal(i))\n\t\trequire.NoError(t, err)\n\t\tfutures[i] = fi\n\t}\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor _, fp := range futures {\n\t\tgr, err := fp.Result()\n\t\trequire.NoError(t, err)\n\t\tif gr != nil {\n\t\t\tfmt.Println(gr.String())\n\t\t}\n\t}\n}\n\nfunc TestDMapPipeline_IncrByFloat(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfutures := make(map[int]*FutureIncrByFloat)\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tfor i := 0; i < 100; i++ {\n\t\tfi, err := pipe.IncrByFloat(ctx, \"mykey\", 1.2)\n\t\trequire.NoError(t, err)\n\t\tfutures[i] = fi\n\t}\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor _, fp := range futures {\n\t\t_, err := fp.Result()\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc TestDMapPipeline_Discard(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfutures := make(map[int]*FuturePut)\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 100; i++ {\n\t\tfp, err := pipe.Put(ctx, testutil.ToKey(i), testutil.ToVal(i))\n\t\trequire.NoError(t, err)\n\t\tfutures[i] = fp\n\t}\n\n\t// Discard all pipelined DM.PUT requests.\n\terr = pipe.Discard()\n\trequire.NoError(t, err)\n\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 100; i++ {\n\t\tkey := testutil.ToKey(i)\n\t\t_, err := dm.Get(ctx, key)\n\t\trequire.ErrorIs(t, err, ErrKeyNotFound)\n\t}\n}\n\nfunc TestDMapPipeline_Close(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfutures := make(map[int]*FuturePut)\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\n\tfor i := 0; i < 100; i++ {\n\t\tfp, err := pipe.Put(ctx, testutil.ToKey(i), testutil.ToVal(i))\n\t\trequire.NoError(t, err)\n\t\tfutures[i] = fp\n\t}\n\n\tpipe.Close()\n\n\terr = pipe.Exec(ctx)\n\trequire.ErrorIs(t, err, ErrPipelineClosed)\n}\n\nfunc TestDMapPipeline_ErrNotReady(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tt.Run(\"Put\", func(t *testing.T) {\n\t\tfp, err := pipe.Put(ctx, \"key\", \"value\")\n\t\trequire.NoError(t, err)\n\t\trequire.ErrorIs(t, ErrNotReady, fp.Result())\n\t})\n\n\tt.Run(\"Get\", func(t *testing.T) {\n\t\tfp := pipe.Get(ctx, \"key\")\n\t\t_, err := fp.Result()\n\t\trequire.ErrorIs(t, ErrNotReady, err)\n\t})\n\n\tt.Run(\"Delete\", func(t *testing.T) {\n\t\tfp := pipe.Delete(ctx, \"key\")\n\t\t_, err := fp.Result()\n\t\trequire.ErrorIs(t, ErrNotReady, err)\n\t})\n\n\tt.Run(\"Expire\", func(t *testing.T) {\n\t\tfp, err := pipe.Expire(ctx, \"key\", time.Second)\n\t\trequire.NoError(t, err)\n\t\terr = fp.Result()\n\t\trequire.ErrorIs(t, ErrNotReady, err)\n\t})\n\n\tt.Run(\"Incr\", func(t *testing.T) {\n\t\tfp, err := pipe.Incr(ctx, \"key\", 1)\n\t\trequire.NoError(t, err)\n\t\t_, err = fp.Result()\n\t\trequire.ErrorIs(t, ErrNotReady, err)\n\t})\n\n\tt.Run(\"Decr\", func(t *testing.T) {\n\t\tfp, err := pipe.Decr(ctx, \"key\", 1)\n\t\trequire.NoError(t, err)\n\t\t_, err = fp.Result()\n\t\trequire.ErrorIs(t, ErrNotReady, err)\n\t})\n\n\tt.Run(\"GetPut\", func(t *testing.T) {\n\t\tfp, err := pipe.GetPut(ctx, \"key\", \"value\")\n\t\trequire.NoError(t, err)\n\t\t_, err = fp.Result()\n\t\trequire.ErrorIs(t, ErrNotReady, err)\n\t})\n\n\tt.Run(\"IncrByFloat\", func(t *testing.T) {\n\t\tfp, err := pipe.IncrByFloat(ctx, \"key\", 1)\n\t\trequire.NoError(t, err)\n\t\t_, err = fp.Result()\n\t\trequire.ErrorIs(t, ErrNotReady, err)\n\t})\n}\n\nfunc TestDMapPipeline_EmbeddedClient(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc := db.NewEmbeddedClient()\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tfutures := make(map[int]*FuturePut)\n\tpipe, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipe.Close()\n\n\tfor i := 0; i < 100; i++ {\n\t\tfp, err := pipe.Put(ctx, testutil.ToKey(i), testutil.ToVal(i))\n\t\trequire.NoError(t, err)\n\t\tfutures[i] = fp\n\t}\n\terr = pipe.Exec(ctx)\n\trequire.NoError(t, err)\n\n\tfor _, fp := range futures {\n\t\trequire.NoError(t, fp.Result())\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\tkey := testutil.ToKey(i)\n\t\tgr, err := dm.Get(ctx, key)\n\t\trequire.NoError(t, err)\n\n\t\tvalue, err := gr.Byte()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, testutil.ToVal(i), value)\n\t}\n}\n\nfunc TestDMapPipeline_setOrGetClusterClient(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc := db.NewEmbeddedClient()\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tdm, err := c.NewDMap(\"mydmap\")\n\trequire.NoError(t, err)\n\n\tpipeOne, err := dm.Pipeline()\n\trequire.NoError(t, err)\n\tdefer pipeOne.Close()\n\n\trequire.NotNil(t, dm.(*EmbeddedDMap).clusterClient)\n}\n"
  },
  {
    "path": "pkg/flog/flog.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n/*Package flog is a simple wrapper around Golang's log package which adds verbosity support.*/\npackage flog // import \"github.com/olric-data/olric/pkg/flog\"\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"path\"\n\t\"runtime\"\n\t\"sync/atomic\"\n)\n\n/*\nDerived from kubernetes/klog:\n  * flog.V(1) - Generally useful for this to ALWAYS be visible to an operator\n    * Programmer errors:\n    * Logging extra info about a panic\n    * CLI argument handling\n  * flog.V(2) - A reasonable default log level if you don't want verbosity.\n    * Information about config (listening on X, watching Y)\n    * Errors that repeat frequently that relate to conditions that can be corrected (pod detected as unhealthy)\n  * flog.V(3) - Useful steady state information about the service and important log messages that may correlate to\n    significant changes in the system.  This is the recommended default log level for most systems.\n    * Logging HTTP requests and their exit code\n    * System state changing (killing pod)\n    * Controller state change events (starting pods)\n    * Scheduler log messages\n  * flog.V(4) - Extended information about changes\n    * More info about system state changes\n  * flog.V(5) - Debug level verbosity\n    * Logging in particularly thorny parts of code where you may want to come back later and check it\n  * flog.V(6) - Trace level verbosity\n    * Context to understand the steps leading up to errors and warnings\n    * More information for troubleshooting reported issues\n\n\tThe practical default level is V(2). Developers and QE environments may wish to run at V(3) or V(4).\n*/\n\n// A Logger represents an active logging instance that generates lines of\n// output to an io.Writer. Each logging operation makes a single call to\n// the Writer's Encode method. A Logger can be used simultaneously from\n// multiple goroutines; it guarantees to serialize access to the Writer.\ntype Logger struct {\n\tlogger      *log.Logger\n\tshowLineNum int32\n\tlevel       int32\n}\n\n// New returns a new Logger\nfunc New(logger *log.Logger) *Logger {\n\treturn &Logger{\n\t\tlogger: logger,\n\t}\n}\n\n// SetLevel sets verbosity level.\nfunc (f *Logger) SetLevel(level int32) {\n\tif level < 0 {\n\t\treturn\n\t}\n\tatomic.StoreInt32(&f.level, level)\n}\n\n// ShowLineNumber enables line number support if show is bigger than zero.\nfunc (f *Logger) ShowLineNumber(show int32) {\n\tif show < 0 {\n\t\treturn\n\t}\n\tatomic.StoreInt32(&f.showLineNum, show)\n}\n\n// Verbose is a type that implements Printf and Println with verbosity support.\ntype Verbose struct {\n\tok bool\n\tf  *Logger\n}\n\n// V reports whether verbosity at the call site is at least the requested level. The returned value is a struct\n// of type Verbose, which implements Printf and Println\nfunc (f *Logger) V(level int32) Verbose {\n\treturn Verbose{\n\t\tok: atomic.LoadInt32(&f.level) >= level,\n\t\tf:  f,\n\t}\n}\n\n// Ok will return true if this log level is enabled, guarded by the value of verbosity level.\nfunc (v Verbose) Ok() bool {\n\treturn v.ok\n}\n\n// Printf calls v.f.logger.Printf to print to the logger.\n// Arguments are handled in the manner of fmt.Printf.\nfunc (v Verbose) Printf(format string, i ...interface{}) {\n\tif !v.ok {\n\t\treturn\n\t}\n\tif atomic.LoadInt32(&v.f.showLineNum) != 1 {\n\t\tv.f.logger.Printf(format, i...)\n\t} else {\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tv.f.logger.Printf(fmt.Sprintf(\"%s => %s:%d\", format, path.Base(fn), line), i...)\n\t}\n}\n\n// Println calls v.f.logger.Println to print to the logger.\n// Arguments are handled in the manner of fmt.Println.\nfunc (v Verbose) Println(i ...interface{}) {\n\tif !v.ok {\n\t\treturn\n\t}\n\tif atomic.LoadInt32(&v.f.showLineNum) != 1 {\n\t\tv.f.logger.Println(i...)\n\t} else {\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tv.f.logger.Println(fmt.Sprintf(\"%s => %s:%d\", fmt.Sprint(i...), path.Base(fn), line))\n\t}\n}\n"
  },
  {
    "path": "pkg/neterrors/errors.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage neterrors\n\nimport (\n\t\"errors\"\n)\n\nvar (\n\tErrInvalidArgument  = errors.New(\"invalid argument\")\n\tErrUnknownOperation = errors.New(\"unknown operation\")\n\tErrInternalFailure  = errors.New(\"internal failure\")\n\tErrNotImplemented   = errors.New(\"not implemented\")\n\tErrOperationTimeout = errors.New(\"operation timeout\")\n)\n"
  },
  {
    "path": "pkg/service_discovery/service_discovery.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n/*Package service_discovery provides ServiceDiscovery interface for plugins*/\npackage service_discovery // import \"github.com/olric-data/olric/pkg/service_discovery\"\n\nimport \"log\"\n\n// ServiceDiscovery represents an interface for discovering, registering nodes within an Olric cluster.\ntype ServiceDiscovery interface {\n\n\t// Initialize prepares the service discovery plugin for use and ensures it is ready for further operations.\n\tInitialize() error\n\n\t// SetConfig sets the configuration for the service discovery plugin using the provided map of settings.\n\tSetConfig(c map[string]interface{}) error\n\n\t// SetLogger assigns a custom logger to the service discovery instance for logging operations.\n\tSetLogger(l *log.Logger)\n\n\t// Register registers the current node in the service discovery directory, enabling it to participate in the cluster.\n\tRegister() error\n\n\t// Deregister removes the current node from the service discovery directory and stops its participation in the cluster.\n\tDeregister() error\n\n\t// DiscoverPeers retrieves a list of available peers in the cluster and returns their addresses or an error if any occurs.\n\tDiscoverPeers() ([]string, error)\n\n\t// Close gracefully terminates all operations and releases resources associated with the service discovery instance.\n\tClose() error\n}\n"
  },
  {
    "path": "pkg/storage/config.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage storage\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n)\n\n// Config defines a new storage engine configuration\ntype Config struct {\n\tm map[string]interface{}\n\tsync.RWMutex\n}\n\n// NewConfig returns a new Config\nfunc NewConfig(cfg map[string]interface{}) *Config {\n\tif cfg == nil {\n\t\tcfg = make(map[string]interface{})\n\t}\n\treturn &Config{\n\t\tm: cfg,\n\t}\n}\n\n// Add adds a new key/value pair to Config\nfunc (c *Config) Add(key string, value interface{}) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.m[key] = value\n}\n\n// Get loads a configuration variable with its key, otherwise it returns an error.\nfunc (c *Config) Get(key string) (interface{}, error) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tvalue, ok := c.m[key]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"not found: %s\", key)\n\t}\n\treturn value, nil\n}\n\n// Delete deletes a configuration variable with its key.\nfunc (c *Config) Delete(key string) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tdelete(c.m, key)\n}\n\n// Copy creates a thread-safe copy of the existing Config struct.\nfunc (c *Config) Copy() *Config {\n\tc.Lock()\n\tdefer c.Unlock()\n\tn := &Config{\n\t\tm: make(map[string]interface{}),\n\t}\n\tfor key, value := range c.m {\n\t\tn.m[key] = value\n\t}\n\treturn n\n}\n\n// ToMap casts Config to map[string]interface{} type.\nfunc (c *Config) ToMap() map[string]interface{} {\n\treturn c.Copy().m\n}\n"
  },
  {
    "path": "pkg/storage/config_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage storage\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc Test_Config(t *testing.T) {\n\tc := NewConfig(nil)\n\tc.Add(\"string-key\", \"string-value\")\n\tc.Add(\"integer-key\", 65786)\n\n\tt.Run(\"Get\", func(t *testing.T) {\n\t\tsv, err := c.Get(\"string-key\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got %v\", err)\n\t\t}\n\t\tif sv.(string) != \"string-value\" {\n\t\t\tt.Fatalf(\"Expected string-value. Got %v\", sv)\n\t\t}\n\n\t\tiv, err := c.Get(\"integer-key\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Expected nil. Got %v\", err)\n\t\t}\n\t\tif iv.(int) != 65786 {\n\t\t\tt.Fatalf(\"Expected integer-value. Got %v\", iv)\n\t\t}\n\t})\n\n\tt.Run(\"Delete\", func(t *testing.T) {\n\t\tc.Delete(\"string-key\")\n\t\t_, err := c.Get(\"string-key\")\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"Expected an error. Got %v\", err)\n\t\t}\n\t})\n\n\tt.Run(\"Copy\", func(t *testing.T) {\n\t\tcopied := c.Copy()\n\t\tif copied == c {\n\t\t\tt.Fatalf(\"New config is the same with the previous one\")\n\t\t}\n\t\tif !reflect.DeepEqual(c, copied) {\n\t\t\tt.Fatalf(\"New config is not idential with the previous one\")\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "pkg/storage/engine.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage storage\n\nimport (\n\t\"errors\"\n\t\"log\"\n)\n\n// ErrKeyTooLarge is an error that indicates the given key is larger than the determined key size.\n// The current maximum key length is 256.\nvar ErrKeyTooLarge = errors.New(\"key too large\")\n\n// ErrEntryTooLarge returned if required space for an entry is bigger than table size.\nvar ErrEntryTooLarge = errors.New(\"entry too large for the configured table size\")\n\n// ErrKeyNotFound is an error that indicates that the requested key could not be found in the DB.\nvar ErrKeyNotFound = errors.New(\"key not found\")\n\n// ErrNotImplemented means that the interface implementation does not support\n// the functionality required to fulfill the request.\nvar ErrNotImplemented = errors.New(\"not implemented yet\")\n\n// TransferIterator is an interface to implement iterators to encode and transfer\n// the underlying tables to another Olric member.\ntype TransferIterator interface {\n\t// Next returns true if there are more tables to Export in the storage instance.\n\t// Otherwise, it returns false.\n\tNext() bool\n\n\t// Export encodes a table and returns result. This encoded table can be moved to another Olric node.\n\tExport() ([]byte, int, error)\n\n\t// Drop drops a table with its index from the storage engine instance and frees allocated resources.\n\tDrop(int) error\n}\n\n// Engine defines methods for a storage engine implementation.\ntype Engine interface {\n\t// SetConfig sets a storage engine configuration. nil can be accepted, but\n\t// it depends on the implementation.\n\tSetConfig(*Config)\n\n\t// SetLogger sets a logger. nil can be accepted, but it depends on the implementation.\n\tSetLogger(*log.Logger)\n\n\t// Start can be used to run background services before starting operation.\n\tStart() error\n\n\t// NewEntry returns a new Entry interface implemented by the current storage\n\t// engine implementation.\n\tNewEntry() Entry\n\n\t// Name returns name of the current storage engine implementation.\n\tName() string\n\n\t// Fork creates an empty instance of an online engine by using the current\n\t// configuration.\n\tFork(*Config) (Engine, error)\n\n\t// PutRaw inserts an encoded entry into the storage engine.\n\tPutRaw(uint64, []byte) error\n\n\t// Put inserts a new Entry into the storage engine.\n\tPut(uint64, Entry) error\n\n\t// GetRaw reads an encoded entry from the storage engine.\n\tGetRaw(uint64) ([]byte, error)\n\n\t// Get reads an entry from the storage engine.\n\tGet(uint64) (Entry, error)\n\n\t// GetTTL extracts TTL of an entry.\n\tGetTTL(uint64) (int64, error)\n\n\t// GetLastAccess extracts LastAccess of an entry.\n\tGetLastAccess(uint64) (int64, error)\n\n\t// GetKey extracts key of an entry.\n\tGetKey(uint64) (string, error)\n\n\t// Delete deletes an entry from the storage engine.\n\tDelete(uint64) error\n\n\t// UpdateTTL updates TTL of an entry. It returns ErrKeyNotFound,\n\t// if the key doesn't exist.\n\tUpdateTTL(uint64, Entry) error\n\n\t// TransferIterator returns a new TransferIterator instance to the caller.\n\tTransferIterator() TransferIterator\n\n\t// Import imports an encoded table of the storage engine implementation and\n\t// calls f for every Entry item in that table.\n\tImport(data []byte, f func(uint64, Entry) error) error\n\n\t// Stats returns metrics for an online storage engine.\n\tStats() Stats\n\n\t// Check returns true, if the key exists.\n\tCheck(uint64) bool\n\n\t// Range implements a loop over the storage engine\n\tRange(func(uint64, Entry) bool)\n\n\t// RangeHKey implements a loop for hashed keys(HKeys).\n\tRangeHKey(func(uint64) bool)\n\n\t// Scan implements an iterator. The caller starts iterating from the cursor. \"count\" is the number of entries\n\t// that will be returned during the iteration. Scan calls the function \"f\" on Entry items for every iteration.\n\t//It returns the next cursor if everything is okay. Otherwise, it returns an error.\n\tScan(cursor uint64, count int, f func(Entry) bool) (uint64, error)\n\n\t// ScanRegexMatch is the same with the Scan method, but it supports regular expressions on keys.\n\tScanRegexMatch(cursor uint64, match string, count int, f func(Entry) bool) (uint64, error)\n\n\t// Compaction reorganizes storage tables and reclaims wasted resources.\n\tCompaction() (bool, error)\n\n\t// Close stops an online storage engine instance. It may free some of allocated\n\t// resources. A storage engine implementation should be started again, but it\n\t// depends on the implementation.\n\tClose() error\n\n\t// Destroy stops an online storage engine instance and frees allocated resources.\n\t// It should not be possible to reuse a destroyed storage engine.\n\tDestroy() error\n}\n"
  },
  {
    "path": "pkg/storage/entry.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage storage\n\n// Entry interface defines methods for a storage entry.\ntype Entry interface {\n\t// SetKey accepts strings as a key and inserts the key into the underlying\n\t// data structure.\n\tSetKey(string)\n\n\t// Key returns the key as string\n\tKey() string\n\n\t// SetValue accepts a byte slice as a value and inserts the value into the\n\t// underlying data structure.\n\tSetValue([]byte)\n\n\t// Value returns the value as a byte slice.\n\tValue() []byte\n\n\t// SetTTL sets TTL to an entry.\n\tSetTTL(int64)\n\n\t// TTL returns the current TTL for an entry.\n\tTTL() int64\n\n\t// SetTimestamp sets the current timestamp to an entry.\n\tSetTimestamp(int64)\n\n\t// Timestamp returns the current timestamp for an entry.\n\tTimestamp() int64\n\n\tSetLastAccess(int64)\n\n\tLastAccess() int64\n\n\t// Encode encodes an entry into a binary form and returns the result.\n\tEncode() []byte\n\n\t// Decode decodes a byte slice into an Entry.\n\tDecode([]byte)\n}\n"
  },
  {
    "path": "pkg/storage/stats.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage storage\n\n// Stats defines metrics exposed by a storage engine implementation.\ntype Stats struct {\n\t// Currently allocated memory by the engine.\n\tAllocated int\n\n\t// Used portion of allocated memory\n\tInuse int\n\n\t// Deleted portions of allocated memory.\n\tGarbage int\n\n\t// Total number of keys hosted by the engine instance.\n\tLength int\n\n\t// Number of tables hosted by the engine instance.\n\tNumTables int\n\n\t// Any other metrics that's specific to an engine implementation.\n\tExtras map[string]interface{}\n}\n"
  },
  {
    "path": "pubsub.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"strings\"\n\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/redis/go-redis/v9\"\n)\n\ntype PubSub struct {\n\tconfig *pubsubConfig\n\trc     *redis.Client\n\tclient *server.Client\n}\n\nfunc newPubSub(client *server.Client, options ...PubSubOption) (*PubSub, error) {\n\tvar (\n\t\terr error\n\t\trc  *redis.Client\n\t\tpc  pubsubConfig\n\t)\n\tfor _, opt := range options {\n\t\topt(&pc)\n\t}\n\n\taddr := strings.Trim(pc.Address, \" \")\n\tif addr != \"\" {\n\t\trc = client.Get(addr)\n\t} else {\n\t\trc, err = client.Pick()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &PubSub{\n\t\tconfig: &pc,\n\t\trc:     rc,\n\t\tclient: client,\n\t}, nil\n}\n\nfunc (ps *PubSub) Subscribe(ctx context.Context, channels ...string) *redis.PubSub {\n\treturn ps.rc.Subscribe(ctx, channels...)\n}\n\nfunc (ps *PubSub) PSubscribe(ctx context.Context, channels ...string) *redis.PubSub {\n\treturn ps.rc.PSubscribe(ctx, channels...)\n}\n\nfunc (ps *PubSub) Publish(ctx context.Context, channel string, message interface{}) (int64, error) {\n\treturn ps.rc.Publish(ctx, channel, message).Result()\n}\n\nfunc (ps *PubSub) PubSubChannels(ctx context.Context, pattern string) ([]string, error) {\n\treturn ps.rc.PubSubChannels(ctx, pattern).Result()\n}\n\nfunc (ps *PubSub) PubSubNumSub(ctx context.Context, channels ...string) (map[string]int64, error) {\n\treturn ps.rc.PubSubNumSub(ctx, channels...).Result()\n}\n\nfunc (ps *PubSub) PubSubNumPat(ctx context.Context) (int64, error) {\n\treturn ps.rc.PubSubNumPat(ctx).Result()\n}\n"
  },
  {
    "path": "pubsub_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc pubsubTestRunner(t *testing.T, ps *PubSub, kind, channel string) {\n\tctx := context.Background()\n\tvar rp *redis.PubSub\n\tswitch kind {\n\tcase \"subscribe\":\n\t\trp = ps.Subscribe(ctx, channel)\n\tcase \"psubscribe\":\n\t\trp = ps.PSubscribe(ctx, channel)\n\t}\n\n\tdefer func() {\n\t\trequire.NoError(t, rp.Close())\n\t}()\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\tmsgi, err := rp.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\tsubs := msgi.(*redis.Subscription)\n\trequire.Equal(t, kind, subs.Kind)\n\trequire.Equal(t, channel, subs.Channel)\n\trequire.Equal(t, 1, subs.Count)\n\n\t// Go channel which receives messages.\n\tch := rp.Channel()\n\n\texpected := make(map[string]struct{})\n\tfor i := 0; i < 10; i++ {\n\t\tmsg := fmt.Sprintf(\"my-message-%d\", i)\n\t\tcount, err := ps.Publish(ctx, \"my-channel\", msg)\n\t\trequire.Equal(t, int64(1), count)\n\t\trequire.NoError(t, err)\n\t\texpected[msg] = struct{}{}\n\t}\n\n\tconsumed := make(map[string]struct{})\nL:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-ch:\n\t\t\trequire.Equal(t, \"my-channel\", msg.Channel)\n\t\t\tconsumed[msg.Payload] = struct{}{}\n\t\t\tif len(consumed) == 10 {\n\t\t\t\t// It would be OK\n\t\t\t\tbreak L\n\t\t\t}\n\t\tcase <-time.After(5 * time.Second):\n\t\t\t// Enough. Break it and check the consumed items.\n\t\t\tbreak L\n\t\t}\n\t}\n\n\trequire.Equal(t, expected, consumed)\n}\n\nfunc TestPubSub_Publish_Subscribe(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tps, err := c.NewPubSub(ToAddress(db.rt.This().String()))\n\trequire.NoError(t, err)\n\n\tpubsubTestRunner(t, ps, \"subscribe\", \"my-channel\")\n}\n\nfunc TestPubSub_Publish_PSubscribe(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tps, err := c.NewPubSub(ToAddress(db.rt.This().String()))\n\trequire.NoError(t, err)\n\tpubsubTestRunner(t, ps, \"psubscribe\", \"my-*\")\n}\n\nfunc TestPubSub_PubSubChannels(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tps, err := c.NewPubSub(ToAddress(db.rt.This().String()))\n\trequire.NoError(t, err)\n\n\trp := ps.Subscribe(ctx, \"my-channel\")\n\n\tdefer func() {\n\t\trequire.NoError(t, rp.Close())\n\t}()\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\t_, err = rp.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\tchannels, err := ps.PubSubChannels(ctx, \"my-*\")\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, []string{\"my-channel\"}, channels)\n}\n\nfunc TestPubSub_PubSubNumSub(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tps, err := c.NewPubSub(ToAddress(db.rt.This().String()))\n\trequire.NoError(t, err)\n\n\trp := ps.Subscribe(ctx, \"my-channel\")\n\n\tdefer func() {\n\t\trequire.NoError(t, rp.Close())\n\t}()\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\t_, err = rp.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\tnumsub, err := ps.PubSubNumSub(ctx, \"my-channel\", \"foobar\")\n\trequire.NoError(t, err)\n\n\texpected := map[string]int64{\n\t\t\"foobar\":     0,\n\t\t\"my-channel\": 1,\n\t}\n\trequire.Equal(t, expected, numsub)\n}\n\nfunc TestPubSub_PubSubNumPat(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tps, err := c.NewPubSub(ToAddress(db.rt.This().String()))\n\trequire.NoError(t, err)\n\n\trp := ps.PSubscribe(ctx, \"my-*\")\n\n\tdefer func() {\n\t\trequire.NoError(t, rp.Close())\n\t}()\n\n\t// Wait for confirmation that subscription is created before publishing anything.\n\t_, err = rp.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\n\tnumpat, err := ps.PubSubNumPat(ctx)\n\trequire.NoError(t, err)\n\trequire.Equal(t, int64(1), numpat)\n}\n\nfunc TestPubSub_Cluster(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb1 := cluster.addMember(t)\n\tdb2 := cluster.addMember(t)\n\n\t// Create a subscriber\n\tctx := context.Background()\n\tc, err := NewClusterClient([]string{db1.name})\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, c.Close(ctx))\n\t}()\n\n\tps1, err := c.NewPubSub(ToAddress(db1.rt.This().String()))\n\trequire.NoError(t, err)\n\n\trp := ps1.Subscribe(ctx, \"my-channel\")\n\tdefer func() {\n\t\trequire.NoError(t, rp.Close())\n\t}()\n\t// Wait for confirmation that subscription is created before publishing anything.\n\t_, err = rp.ReceiveTimeout(ctx, time.Second)\n\trequire.NoError(t, err)\n\treceiveChan := rp.Channel()\n\n\t// Create a publisher\n\n\te := db2.NewEmbeddedClient()\n\tps2, err := e.NewPubSub(ToAddress(db2.rt.This().String()))\n\trequire.NoError(t, err)\n\texpected := make(map[string]struct{})\n\tfor i := 0; i < 10; i++ {\n\t\tmsg := fmt.Sprintf(\"my-message-%d\", i)\n\t\tcount, err := ps2.Publish(ctx, \"my-channel\", msg)\n\t\trequire.Equal(t, int64(1), count)\n\t\trequire.NoError(t, err)\n\t\texpected[msg] = struct{}{}\n\t}\n\n\tconsumed := make(map[string]struct{})\nL:\n\tfor {\n\t\tselect {\n\t\tcase msg := <-receiveChan:\n\t\t\trequire.Equal(t, \"my-channel\", msg.Channel)\n\t\t\tconsumed[msg.Payload] = struct{}{}\n\t\t\tif len(consumed) == 10 {\n\t\t\t\t// It would be OK\n\t\t\t\tbreak L\n\t\t\t}\n\t\tcase <-time.After(5 * time.Second):\n\t\t\t// Enough. Break it and check the consumed items.\n\t\t\tbreak L\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "stats/stats.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n/*Package stats exposes internal data structures for Stat command*/\npackage stats\n\nimport \"runtime\"\n\ntype (\n\t// PartitionID denotes ID of a partition in the cluster.\n\tPartitionID uint64\n\n\t// MemberID denotes ID of a member in the cluster.\n\tMemberID uint64\n)\n\n// SlabInfo denotes memory usage of the storage engine(a hash indexed, append only byte slice).\ntype SlabInfo struct {\n\t// Total allocated space by the append-only byte slice.\n\tAllocated int `json:\"allocated\"`\n\n\t// Total inuse memory space in the append-only byte slice.\n\tInuse int `json:\"inuse\"`\n\n\t// Total garbage(deleted key/value pairs) space in the append-only byte slice.\n\tGarbage int `json:\"garbage\"`\n}\n\n// DMap denotes a distributed map instance on the cluster.\ntype DMap struct {\n\t// Number of keys in the DMap.\n\tLength int `json:\"length\"`\n\n\t// Statistics about memory representation of a DMap.\n\tSlabInfo SlabInfo `json:\"slab_info\"`\n\n\t// Number of tables in a storage instance.\n\tNumTables int `json:\"num_tables\"`\n}\n\n// Partition denotes a partition and its metadata in the cluster.\ntype Partition struct {\n\t// PreviousOwners is a list of members whose still owns some fragments.\n\tPreviousOwners []Member `json:\"previous_owners\"`\n\n\t// Backups is a list of members whose holds replicas of this partition.\n\tBackups []Member `json:\"backups\"`\n\n\t// Total number of entries in the partition.\n\tLength int `json:\"length\"`\n\n\t// DMaps is a map that contains statistics of DMaps in this partition.\n\tDMaps map[string]DMap `json:\"dmaps\"`\n}\n\n// Runtime exposes memory stats and various metrics from Go runtime.\ntype Runtime struct {\n\t// GOOS is the running program's operating system target\n\tGOOS string `json:\"goos\"`\n\n\t// GOARCH is the running program's architecture target\n\tGOARCH string `json:\"goarch\"`\n\n\t// Version returns the Go tree's version string.\n\tVersion string `json:\"version\"`\n\n\t// NumCPU returns the number of logical CPUs usable by the current process.\n\tNumCPU int `json:\"num_cpu\"`\n\n\t// NumGoroutine returns the number of goroutines that currently exist.\n\tNumGoroutine int `json:\"num_goroutine\"`\n\n\t// MemStats records statistics about the memory allocator.\n\tMemStats runtime.MemStats `json:\"mem_stats\"`\n}\n\n// Member denotes a cluster member.\ntype Member struct {\n\t// Name is name of the node in the cluster.\n\tName string `json:\"name\"`\n\n\t// ID is the unique identifier of this node in the cluster. It's derived\n\t// from Name and Birthdate.\n\tID uint64 `json:\"id\"`\n\n\t// Birthdate is UNIX time in nanoseconds.\n\tBirthdate int64 `json:\"birthdate\"`\n}\n\n// String returns the member name.\nfunc (m Member) String() string {\n\treturn m.Name\n}\n\n// Network holds network statistics.\ntype Network struct {\n\t// ConnectionsTotal is total number of connections opened since the server started running.\n\tConnectionsTotal int64 `json:\"connections_total\"`\n\n\t// CurrentConnections is current number of open connections.\n\tCurrentConnections int64 `json:\"current_connections\"`\n\n\t// WrittenBytesTotal is total number of bytes sent by this server to network.\n\tWrittenBytesTotal int64 `json:\"written_bytes_total\"`\n\n\t// ReadBytesTotal is total number of bytes read by this server from network.\n\tReadBytesTotal int64 `json:\"read_bytes_total\"`\n\n\t// CommandsTotal is total number of all requests (get, put, etc.).\n\tCommandsTotal int64 `json:\"commands_total\"`\n}\n\n// DMaps holds global DMap statistics.\ntype DMaps struct {\n\t// EntriesTotal is the total number of entries(including replicas) stored during the life of this instance.\n\tEntriesTotal int64 `json:\"entries_total\"`\n\n\t// DeleteHits is the number of deletion reqs resulting in an item being removed.\n\tDeleteHits int64 `json:\"delete_hits\"`\n\n\t// DeleteMisses is the number of deletions reqs for missing keys\n\tDeleteMisses int64 `json:\"delete_misses\"`\n\n\t// GetMisses is the number of entries that have been requested and not found\n\tGetMisses int64 `json:\"get_misses\"`\n\n\t// GetHits is the number of entries that have been requested and found present\n\tGetHits int64 `json:\"get_hits\"`\n\n\t// EvictedTotal is the number of entries removed from cache to free memory for new entries.\n\tEvictedTotal int64 `json:\"evicted_total\"`\n}\n\n// PubSub holds global Pub/Sub statistics.\ntype PubSub struct {\n\t// PublishedTotal is the total number of published messages to PubSub during the life of this instance.\n\tPublishedTotal int64 `json:\"published_total\"`\n\n\t// CurrentSubscribers is the current number of Pub/Sub listeners of PubSub.\n\tCurrentSubscribers int64 `json:\"current_subscribers\"`\n\n\t// SubscribersTotal is the total number of registered Pub/Sub listeners during the life of this instance.\n\tSubscribersTotal int64 `json:\"subscribers_total\"`\n\n\t// CurrentSubscribers is the current number of Pub/Sub listeners of PubSub.\n\tCurrentPSubscribers int64 `json:\"current_psubscribers\"`\n\n\t// SubscribersTotal is the total number of registered Pub/Sub listeners during the life of this instance.\n\tPSubscribersTotal int64 `json:\"psubscribers_total\"`\n}\n\n// Stats is a struct that exposes statistics about the current state of a member.\ntype Stats struct {\n\t// Cmdline holds the command-line arguments, starting with the program name.\n\tCmdline []string `json:\"cmdline\"`\n\n\t// ReleaseVersion is the current Olric version\n\tReleaseVersion string `json:\"release_version\"`\n\n\t// UptimeSeconds is number of seconds since the server started.\n\tUptimeSeconds int64 `json:\"uptime_seconds\"`\n\n\t// Stats from Golang runtime\n\tRuntime *Runtime `json:\"runtime\"`\n\n\t// ClusterCoordinator is the current cluster coordinator.\n\tClusterCoordinator Member `json:\"cluster_coordinator\"`\n\n\t// Member denotes the current member.\n\tMember Member `json:\"member\"`\n\n\t// Partitions is a map that contains partition statistics.\n\tPartitions map[PartitionID]Partition `json:\"partitions\"`\n\n\t// Backups is a map that contains backup partition statistics.\n\tBackups map[PartitionID]Partition `json:\"backups\"`\n\n\t// ClusterMembers is a map that contains bootstrapped cluster members\n\tClusterMembers map[MemberID]Member `json:\"cluster_members\"`\n\n\t// Network holds network statistics.\n\tNetwork Network `json:\"network\"`\n\n\t// DMaps holds global DMap statistics.\n\tDMaps DMaps `json:\"dmaps\"`\n\n\t// PubSub holds global Pub/Sub statistics.\n\tPubSub PubSub `json:\"pub_sub\"`\n}\n"
  },
  {
    "path": "stats/stats_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage stats\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestMember_String(t *testing.T) {\n\tm := Member{\n\t\tName:      \"foobar\",\n\t\tID:        123345645678,\n\t\tBirthdate: time.Now().UnixNano(),\n\t}\n\trequire.Equal(t, \"foobar\", m.String())\n}\n"
  },
  {
    "path": "stats.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"encoding/json\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com/olric-data/olric/internal/cluster/partitions\"\n\t\"github.com/olric-data/olric/internal/discovery\"\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/pubsub\"\n\t\"github.com/olric-data/olric/internal/server\"\n\t\"github.com/olric-data/olric/stats\"\n\t\"github.com/tidwall/redcon\"\n)\n\nfunc toMember(member discovery.Member) stats.Member {\n\treturn stats.Member{\n\t\tName:      member.Name,\n\t\tID:        member.ID,\n\t\tBirthdate: member.Birthdate,\n\t}\n}\n\nfunc toMembers(members []discovery.Member) []stats.Member {\n\tvar _stats []stats.Member\n\tfor _, m := range members {\n\t\t_stats = append(_stats, toMember(m))\n\t}\n\treturn _stats\n}\n\nfunc (db *Olric) collectPartitionMetrics(partID uint64, part *partitions.Partition) stats.Partition {\n\towners := part.Owners()\n\tp := stats.Partition{\n\t\tBackups: toMembers(db.backup.PartitionOwnersByID(partID)),\n\t\tLength:  part.Length(),\n\t\tDMaps:   make(map[string]stats.DMap),\n\t}\n\tif len(owners) > 0 {\n\t\tp.PreviousOwners = toMembers(owners[:len(owners)-1])\n\t}\n\tpart.Map().Range(func(name, item interface{}) bool {\n\t\tf := item.(partitions.Fragment)\n\t\tst := f.Stats()\n\t\ttmp := stats.DMap{\n\t\t\tLength:    st.Length,\n\t\t\tNumTables: st.NumTables,\n\t\t}\n\t\ttmp.SlabInfo.Allocated = st.Allocated\n\t\ttmp.SlabInfo.Garbage = st.Garbage\n\t\ttmp.SlabInfo.Inuse = st.Inuse\n\t\tdmapName := strings.TrimPrefix(name.(string), \"dmap.\")\n\t\tp.DMaps[dmapName] = tmp\n\t\treturn true\n\t})\n\treturn p\n}\n\nfunc (db *Olric) checkPartitionOwnership(part *partitions.Partition) bool {\n\towners := part.Owners()\n\tfor _, owner := range owners {\n\t\tif owner.CompareByID(db.rt.This()) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (db *Olric) stats(cfg statsConfig) stats.Stats {\n\ts := stats.Stats{\n\t\tCmdline:            os.Args,\n\t\tReleaseVersion:     ReleaseVersion,\n\t\tUptimeSeconds:      discovery.UptimeSeconds.Read(),\n\t\tClusterCoordinator: toMember(db.rt.Discovery().GetCoordinator()),\n\t\tMember:             toMember(db.rt.This()),\n\t\tPartitions:         make(map[stats.PartitionID]stats.Partition),\n\t\tBackups:            make(map[stats.PartitionID]stats.Partition),\n\t\tClusterMembers:     make(map[stats.MemberID]stats.Member),\n\t\tNetwork: stats.Network{\n\t\t\tConnectionsTotal:   server.ConnectionsTotal.Read(),\n\t\t\tCurrentConnections: server.CurrentConnections.Read(),\n\t\t\tWrittenBytesTotal:  server.WrittenBytesTotal.Read(),\n\t\t\tReadBytesTotal:     server.ReadBytesTotal.Read(),\n\t\t\tCommandsTotal:      server.CommandsTotal.Read(),\n\t\t},\n\t\tDMaps: stats.DMaps{\n\t\t\tEntriesTotal: dmap.EntriesTotal.Read(),\n\t\t\tDeleteHits:   dmap.DeleteHits.Read(),\n\t\t\tDeleteMisses: dmap.DeleteMisses.Read(),\n\t\t\tGetMisses:    dmap.GetMisses.Read(),\n\t\t\tGetHits:      dmap.GetHits.Read(),\n\t\t\tEvictedTotal: dmap.EvictedTotal.Read(),\n\t\t},\n\t\tPubSub: stats.PubSub{\n\t\t\tPublishedTotal:      pubsub.PublishedTotal.Read(),\n\t\t\tCurrentSubscribers:  pubsub.CurrentSubscribers.Read(),\n\t\t\tSubscribersTotal:    pubsub.SubscribersTotal.Read(),\n\t\t\tCurrentPSubscribers: pubsub.CurrentPSubscribers.Read(),\n\t\t\tPSubscribersTotal:   pubsub.PSubscribersTotal.Read(),\n\t\t},\n\t}\n\n\tif cfg.CollectRuntime {\n\t\ts.Runtime = &stats.Runtime{\n\t\t\tGOOS:         runtime.GOOS,\n\t\t\tGOARCH:       runtime.GOARCH,\n\t\t\tVersion:      runtime.Version(),\n\t\t\tNumCPU:       runtime.NumCPU(),\n\t\t\tNumGoroutine: runtime.NumGoroutine(),\n\t\t}\n\t\truntime.ReadMemStats(&s.Runtime.MemStats)\n\t}\n\n\tdb.rt.RLock()\n\tdefer db.rt.RUnlock()\n\n\tdb.rt.Members().Range(func(id uint64, member discovery.Member) bool {\n\t\ts.ClusterMembers[stats.MemberID(id)] = toMember(member)\n\t\treturn true\n\t})\n\n\tfor partID := uint64(0); partID < db.config.PartitionCount; partID++ {\n\t\tprimary := db.primary.PartitionByID(partID)\n\t\tif db.checkPartitionOwnership(primary) {\n\t\t\ts.Partitions[stats.PartitionID(partID)] = db.collectPartitionMetrics(partID, primary)\n\t\t}\n\t\tbackup := db.backup.PartitionByID(partID)\n\t\tif db.checkPartitionOwnership(backup) {\n\t\t\ts.Backups[stats.PartitionID(partID)] = db.collectPartitionMetrics(partID, backup)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc (db *Olric) statsCommandHandler(conn redcon.Conn, cmd redcon.Command) {\n\tstatsCmd, err := protocol.ParseStatsCommand(cmd)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\n\tsc := statsConfig{}\n\tif statsCmd.CollectRuntime {\n\t\tsc.CollectRuntime = true\n\t}\n\tmemberStats := db.stats(sc)\n\tdata, err := json.Marshal(memberStats)\n\tif err != nil {\n\t\tprotocol.WriteError(conn, err)\n\t\treturn\n\t}\n\tconn.WriteBulk(data)\n}\n"
  },
  {
    "path": "stats_test.go",
    "content": "// Copyright 2018-2025 The Olric Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage olric\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/olric-data/olric/internal/dmap\"\n\t\"github.com/olric-data/olric/internal/protocol\"\n\t\"github.com/olric-data/olric/internal/pubsub\"\n\t\"github.com/olric-data/olric/internal/testutil\"\n\t\"github.com/olric-data/olric/stats\"\n\t\"github.com/redis/go-redis/v9\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc resetPubSubStats() {\n\tpubsub.SubscribersTotal.Reset()\n\tpubsub.CurrentPSubscribers.Reset()\n\tpubsub.CurrentSubscribers.Reset()\n\tpubsub.PSubscribersTotal.Reset()\n\tpubsub.PublishedTotal.Reset()\n}\n\nfunc TestOlric_Stats(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\tc := db.NewEmbeddedClient()\n\tdm, err := c.NewDMap(\"mymap\")\n\trequire.NoError(t, err)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tfor i := 0; i < 100; i++ {\n\t\terr = dm.Put(ctx, testutil.ToKey(i), testutil.ToVal(i))\n\t\trequire.NoError(t, err)\n\t}\n\n\ts, err := c.Stats(ctx, db.rt.This().String())\n\trequire.NoError(t, err)\n\n\tif s.ClusterCoordinator.ID != db.rt.This().ID {\n\t\tt.Fatalf(\"Expected cluster coordinator: %v. Got: %v\", db.rt.This(), s.ClusterCoordinator)\n\t}\n\n\trequire.Equal(t, s.Member.Name, db.rt.This().Name)\n\trequire.Equal(t, s.Member.ID, db.rt.This().ID)\n\trequire.Equal(t, s.Member.Birthdate, db.rt.This().Birthdate)\n\tif s.Runtime != nil {\n\t\tt.Error(\"Runtime stats must not be collected by default:\", s.Runtime)\n\t}\n\n\tvar total int\n\tfor partID, part := range s.Partitions {\n\t\ttotal += part.Length\n\t\tif _, ok := part.DMaps[\"mymap\"]; !ok {\n\t\t\tt.Fatalf(\"Expected dmap check result is true. Got false\")\n\t\t}\n\t\tif len(part.PreviousOwners) != 0 {\n\t\t\tt.Fatalf(\"Expected PreviosOwners list is empty. \"+\n\t\t\t\t\"Got: %v for PartID: %d\", part.PreviousOwners, partID)\n\t\t}\n\t\tif part.Length <= 0 {\n\t\t\tt.Fatalf(\"Unexpected Length: %d\", part.Length)\n\t\t}\n\t}\n\tif total != 100 {\n\t\tt.Fatalf(\"Expected total length of partition in stats is 100. Got: %d\", total)\n\t}\n\t_, ok := s.ClusterMembers[stats.MemberID(db.rt.This().ID)]\n\tif !ok {\n\t\tt.Fatalf(\"Expected member ID: %d could not be found in ClusterMembers\", db.rt.This().ID)\n\t}\n}\n\nfunc TestOlric_Stats_CollectRuntime(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\ts, err := e.Stats(context.Background(), db.rt.This().String(), CollectRuntime())\n\trequire.NoError(t, err)\n\n\tif s.Runtime == nil {\n\t\tt.Fatal(\"Runtime stats must be collected by default:\", s.Runtime)\n\t}\n}\n\nfunc TestOlric_Stats_Cluster(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\tdb2 := cluster.addMember(t)\n\n\te := db.NewEmbeddedClient()\n\ts, err := e.Stats(context.Background(), db2.rt.This().String())\n\trequire.NoError(t, err)\n\trequire.Nil(t, s.Runtime)\n\trequire.Equal(t, s.Member.String(), db2.rt.This().String())\n}\n\nfunc TestStats_PubSub(t *testing.T) {\n\tresetPubSubStats()\n\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\trc := redis.NewClient(&redis.Options{Addr: db.rt.This().String()})\n\tctx := context.Background()\n\n\tt.Run(\"Subscribe\", func(t *testing.T) {\n\t\tdefer func() {\n\t\t\tresetPubSubStats()\n\t\t}()\n\n\t\tvar subscribers []*redis.PubSub\n\t\tfor i := 0; i < 5; i++ {\n\t\t\tps := rc.Subscribe(ctx, \"my-channel\")\n\t\t\t// Wait for confirmation that subscription is created before publishing anything.\n\t\t\t_, err := ps.Receive(ctx)\n\t\t\trequire.NoError(t, err)\n\t\t\tsubscribers = append(subscribers, ps)\n\t\t}\n\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tcmd := rc.Publish(ctx, \"my-channel\", fmt.Sprintf(\"message-%d\", i))\n\t\t\tres, err := cmd.Result()\n\t\t\trequire.Equal(t, int64(5), res)\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t\trequire.Equal(t, int64(50), pubsub.PublishedTotal.Read())\n\t\trequire.Equal(t, int64(5), pubsub.SubscribersTotal.Read())\n\t\trequire.Equal(t, int64(5), pubsub.CurrentSubscribers.Read())\n\t\trequire.Equal(t, int64(0), pubsub.PSubscribersTotal.Read())\n\t\trequire.Equal(t, int64(0), pubsub.CurrentPSubscribers.Read())\n\n\t\t// Unsubscribe\n\t\tfor _, s := range subscribers {\n\t\t\terr := s.Unsubscribe(ctx, \"my-channel\")\n\t\t\trequire.NoError(t, err)\n\t\t\t<-time.After(100 * time.Millisecond)\n\t\t}\n\n\t\trequire.Equal(t, int64(5), pubsub.SubscribersTotal.Read())\n\t\trequire.Equal(t, int64(0), pubsub.CurrentSubscribers.Read())\n\t\trequire.Equal(t, int64(0), pubsub.PSubscribersTotal.Read())\n\t\trequire.Equal(t, int64(0), pubsub.CurrentPSubscribers.Read())\n\t})\n\n\tt.Run(\"PSubscribe\", func(t *testing.T) {\n\t\tdefer func() {\n\t\t\tresetPubSubStats()\n\t\t}()\n\n\t\tps := rc.PSubscribe(ctx, \"h?llo\")\n\t\t// Wait for confirmation that subscription is created before publishing anything.\n\t\t_, err := ps.Receive(ctx)\n\t\trequire.NoError(t, err)\n\n\t\tcmd := rc.Publish(ctx, \"hxllo\", \"message\")\n\t\tres, err := cmd.Result()\n\t\trequire.Equal(t, int64(1), res)\n\t\trequire.NoError(t, err)\n\n\t\trequire.Equal(t, int64(1), pubsub.PublishedTotal.Read())\n\t\trequire.Equal(t, int64(1), pubsub.PSubscribersTotal.Read())\n\t\trequire.Equal(t, int64(1), pubsub.CurrentPSubscribers.Read())\n\n\t\trequire.Equal(t, int64(0), pubsub.SubscribersTotal.Read())\n\t\trequire.Equal(t, int64(0), pubsub.CurrentSubscribers.Read())\n\n\t\terr = ps.PUnsubscribe(ctx, \"h?llo\")\n\t\trequire.NoError(t, err)\n\n\t\t<-time.After(100 * time.Millisecond)\n\t\trequire.Equal(t, int64(1), pubsub.PSubscribersTotal.Read())\n\t\trequire.Equal(t, int64(0), pubsub.CurrentPSubscribers.Read())\n\t\trequire.Equal(t, int64(0), pubsub.SubscribersTotal.Read())\n\t\trequire.Equal(t, int64(0), pubsub.CurrentSubscribers.Read())\n\t})\n}\n\nfunc TestStats_DMap(t *testing.T) {\n\tcluster := newTestOlricCluster(t)\n\tdb := cluster.addMember(t)\n\n\trc := redis.NewClient(&redis.Options{Addr: db.rt.This().String()})\n\tctx := context.Background()\n\n\tt.Run(\"DMap stats without eviction\", func(t *testing.T) {\n\t\t// EntriesTotal\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tcmd := protocol.NewPut(\"mydmap\", fmt.Sprintf(\"mykey-%d\", i), []byte(\"myvalue\")).Command(ctx)\n\t\t\terr := rc.Process(ctx, cmd)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NoError(t, cmd.Err())\n\t\t}\n\n\t\t// GetHits\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tcmd := protocol.NewGet(\"mydmap\", fmt.Sprintf(\"mykey-%d\", i)).Command(ctx)\n\t\t\terr := rc.Process(ctx, cmd)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NoError(t, cmd.Err())\n\t\t}\n\n\t\t// DeleteHits\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tcmd := protocol.NewDel(\"mydmap\", fmt.Sprintf(\"mykey-%d\", i)).Command(ctx)\n\t\t\terr := rc.Process(ctx, cmd)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NoError(t, cmd.Err())\n\t\t}\n\n\t\t// GetMisses\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tcmd := protocol.NewGet(\"mydmap\", fmt.Sprintf(\"mykey-%d\", i)).Command(ctx)\n\t\t\terr := rc.Process(ctx, cmd)\n\t\t\terr = protocol.ConvertError(err)\n\t\t\trequire.ErrorIs(t, err, dmap.ErrKeyNotFound)\n\t\t}\n\n\t\t// DeleteMisses\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tcmd := protocol.NewDel(\"mydmap\", fmt.Sprintf(\"mykey-%d\", i)).Command(ctx)\n\t\t\terr := rc.Process(ctx, cmd)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NoError(t, cmd.Err())\n\t\t}\n\n\t\trequire.GreaterOrEqual(t, dmap.EntriesTotal.Read(), int64(10))\n\t\trequire.GreaterOrEqual(t, dmap.GetMisses.Read(), int64(10))\n\t\trequire.GreaterOrEqual(t, dmap.GetHits.Read(), int64(10))\n\t\trequire.GreaterOrEqual(t, dmap.DeleteHits.Read(), int64(10))\n\t\trequire.GreaterOrEqual(t, dmap.DeleteMisses.Read(), int64(10))\n\t})\n\n\tt.Run(\"DMap eviction stats\", func(t *testing.T) {\n\t\t// EntriesTotal, EvictedTotal\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tcmd := protocol.\n\t\t\t\tNewPut(\"mydmap\", fmt.Sprintf(\"mykey-%d\", i), []byte(\"myvalue\")).\n\t\t\t\tSetPX(time.Millisecond.Milliseconds()).\n\t\t\t\tCommand(ctx)\n\t\t\terr := rc.Process(ctx, cmd)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NoError(t, cmd.Err())\n\t\t}\n\t\t<-time.After(100 * time.Millisecond)\n\n\t\t// GetMisses\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tcmd := protocol.NewGet(\"mydmap\", \"mykey\").Command(ctx)\n\t\t\terr := rc.Process(ctx, cmd)\n\t\t\terr = protocol.ConvertError(err)\n\t\t\trequire.ErrorIs(t, err, dmap.ErrKeyNotFound)\n\t\t}\n\n\t\trequire.Greater(t, dmap.DeleteHits.Read(), int64(0))\n\t\trequire.Greater(t, dmap.EvictedTotal.Read(), int64(0))\n\t\trequire.GreaterOrEqual(t, dmap.EntriesTotal.Read(), int64(10))\n\t})\n}\n"
  }
]