[
  {
    "path": ".editorconfig",
    "content": "root = true\n\n[*]\nindent_style = space\nindent_size = 4\nend_of_line = lf\ninsert_final_newline = true\ntrim_trailing_whitespace = true"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/issue-template.md",
    "content": "---\nname: Issue Template\nabout: Template for reporting general issues with the library\ntitle: ''\nlabels: 0 - new\nassignees: ''\n\n---\n\n### Expected behavior\n<!-- _what you expected to happen_ -->\n\n### Actual behavior\n<!-- _what actually happened_ -->\n\n### Steps to reproduce\n\n<!-- _steps to reproduce the issue, or link to example app / reproducer_ -->\n\n### If possible, minimal yet complete reproducer code (or URL to code)\n\n<!-- _anything to help us reproducing the issue_ -->\n\n### Version/commit hash\n\n<!-- _the tag/commit hash_ -->\n\n### Swift & OS version (output of `swift --version && uname -a`)\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "content": "_[One line description of your change]_\n\n### Motivation:\n\n_[Explain here the context, and why you're making that change. What is the problem you're trying to solve.]_\n\n### Modifications:\n\n_[Describe the modifications you've done.]_\n\n### Result:\n\n- Resolves #\n- _[After your change, what will change.]_\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: \"github-actions\"\n    directory: \"/\"\n    schedule:\n      interval: \"weekly\"\n"
  },
  {
    "path": ".github/release.yml",
    "content": "changelog:\n  categories:\n    - title: SemVer Major\n      labels:\n        - ⚠️ semver/major\n    - title: SemVer Minor\n      labels:\n        - 🆕 semver/minor\n    - title: SemVer Patch\n      labels:\n        - 🔨 semver/patch\n    - title: Other Changes\n      labels:\n        - semver/none\n"
  },
  {
    "path": ".github/workflows/main.yml",
    "content": "name: Main\n\npermissions:\n  contents: read\n\non:\n  push:\n    branches: [main]\n  schedule:\n    - cron: \"0 8,20 * * *\"\n\njobs:\n  unit-tests:\n    name: Unit tests\n    uses: apple/swift-nio/.github/workflows/unit_tests.yml@main\n    with:\n      linux_5_10_arguments_override: \"--explicit-target-dependency-import-check error\"\n      linux_6_0_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_6_1_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_6_2_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_6_3_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_nightly_next_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_nightly_main_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n\n  construct-samples-matrix:\n    name: Construct samples matrix\n    runs-on: ubuntu-latest\n    outputs:\n      samples-matrix: '${{ steps.generate-matrix.outputs.samples-matrix }}'\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v6\n        with:\n          persist-credentials: false\n      - id: generate-matrix\n        run: echo \"samples-matrix=$(curl -s https://raw.githubusercontent.com/apple/swift-nio/main/scripts/generate_matrix.sh | bash)\" >> \"$GITHUB_OUTPUT\"\n        env:\n          MATRIX_LINUX_COMMAND: \"swift build --package-path Samples --explicit-target-dependency-import-check error\"\n\n  samples:\n    name: Samples\n    needs: construct-samples-matrix\n    uses: apple/swift-nio/.github/workflows/swift_test_matrix.yml@main\n    with:\n      name: \"Samples\"\n      matrix_string: '${{ needs.construct-samples-matrix.outputs.samples-matrix }}'\n\n  macos-tests:\n    name: macOS tests\n    uses: apple/swift-nio/.github/workflows/macos_tests.yml@main\n    with:\n      build_scheme: \"none\"  # no defined build schemes\n      macos_xcode_build_enabled: false\n      ios_xcode_build_enabled: false\n      watchos_xcode_build_enabled: false\n      tvos_xcode_build_enabled: false\n      visionos_xcode_build_enabled: false\n\n  static-sdk:\n    name: Static SDK\n    # Workaround https://github.com/nektos/act/issues/1875\n    uses: apple/swift-nio/.github/workflows/static_sdk.yml@main\n\n  release-builds:\n    name: Release builds\n    uses: apple/swift-nio/.github/workflows/release_builds.yml@main\n"
  },
  {
    "path": ".github/workflows/pull_request.yml",
    "content": "name: PR\n\npermissions:\n  contents: read\n\non:\n  pull_request:\n    types: [opened, reopened, synchronize]\n\njobs:\n  soundness:\n    name: Soundness\n    uses: swiftlang/github-workflows/.github/workflows/soundness.yml@0.0.10\n    with:\n      license_header_check_project_name: \"Swift Cluster Membership\"\n\n  unit-tests:\n    name: Unit tests\n    uses: apple/swift-nio/.github/workflows/unit_tests.yml@main\n    with:\n      linux_5_10_arguments_override: \"--explicit-target-dependency-import-check error\"\n      linux_6_0_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_6_1_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_6_2_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_6_3_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_nightly_next_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n      linux_nightly_main_arguments_override: \"--explicit-target-dependency-import-check error -Xswiftc -require-explicit-sendable\"\n\n  construct-samples-matrix:\n    name: Construct samples matrix\n    runs-on: ubuntu-latest\n    outputs:\n      samples-matrix: '${{ steps.generate-matrix.outputs.samples-matrix }}'\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v6\n        with:\n          persist-credentials: false\n      - id: generate-matrix\n        run: echo \"samples-matrix=$(curl -s https://raw.githubusercontent.com/apple/swift-nio/main/scripts/generate_matrix.sh | bash)\" >> \"$GITHUB_OUTPUT\"\n        env:\n          MATRIX_LINUX_COMMAND: \"swift build --package-path Samples --explicit-target-dependency-import-check error\"\n\n  samples:\n    name: Samples\n    needs: construct-samples-matrix\n    uses: apple/swift-nio/.github/workflows/swift_test_matrix.yml@main\n    with:\n      name: \"Samples\"\n      matrix_string: '${{ needs.construct-samples-matrix.outputs.samples-matrix }}'\n\n  cxx-interop:\n    name: Cxx interop\n    uses: apple/swift-nio/.github/workflows/cxx_interop.yml@main\n\n  static-sdk:\n    name: Static SDK\n    # Workaround https://github.com/nektos/act/issues/1875\n    uses: apple/swift-nio/.github/workflows/static_sdk.yml@main\n\n  release-builds:\n    name: Release builds\n    uses: apple/swift-nio/.github/workflows/release_builds.yml@main\n"
  },
  {
    "path": ".github/workflows/pull_request_label.yml",
    "content": "name: PR label\n\npermissions:\n  contents: read\n\non:\n  pull_request:\n    types: [labeled, unlabeled, opened, reopened, synchronize]\n\njobs:\n  semver-label-check:\n    name: Semantic version label check\n    runs-on: ubuntu-latest\n    timeout-minutes: 1\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v6\n        with:\n          persist-credentials: false\n      - name: Check for Semantic Version label\n        uses: apple/swift-nio/.github/actions/pull_request_semver_label_checker@main\n"
  },
  {
    "path": ".gitignore",
    "content": ".DS_Store\n.swift-version\n\n*.orig\n*.app\n\n/.build\n/Samples/.build\n/.SourceKitten\n/Packages\n.xcode\n*.app\n/*.xcodeproj\nSamples/swift-cluster-membership-samples.xcodeproj\n.xcode\n.idea\nSamples/swift-cluster-membership-samples.xcodeproj\n\n# rendered docs output dirs\n/reference/\n/api/\n.swiftpm/\nPackage.resolved\n\n"
  },
  {
    "path": ".licenseignore",
    "content": ".gitignore\n**/.gitignore\n.licenseignore\n.gitattributes\n.git-blame-ignore-revs\n.mailfilter\n.mailmap\n.spi.yml\n.swift-format\n.editorconfig\n.github/*\n*.md\n*.txt\n*.yml\n*.yaml\n*.json\nPackage.swift\n**/Package.swift\nPackage@-*.swift\n**/Package@-*.swift\nPackage.resolved\n**/Package.resolved\nMakefile\n*.modulemap\n**/*.modulemap\n**/*.docc/*\n*.xcprivacy\n**/*.xcprivacy\n*.symlink\n**/*.symlink\nDockerfile\n**/Dockerfile\nSnippets/*\ndev/git.commit.template\n.unacceptablelanguageignore\nIntegrationTests/*.sh\nSources/SWIM/Utils/Heap.swift\nTests/SWIMTests/HeapTests.swift\n"
  },
  {
    "path": ".spi.yml",
    "content": "version: 1\nbuilder:\n  configs:\n    - documentation_targets: [ClusterMembership, SWIM]\n"
  },
  {
    "path": ".swift-format",
    "content": "{\n\n\n   \"version\" : 1,\n   \"indentation\" : {\n     \"spaces\" : 4\n   },\n   \"tabWidth\" : 4,\n   \"fileScopedDeclarationPrivacy\" : {\n     \"accessLevel\" : \"private\"\n   },\n   \"spacesAroundRangeFormationOperators\" : false,\n   \"indentConditionalCompilationBlocks\" : false,\n   \"indentSwitchCaseLabels\" : false,\n   \"lineBreakAroundMultilineExpressionChainComponents\" : false,\n   \"lineBreakBeforeControlFlowKeywords\" : false,\n   \"lineBreakBeforeEachArgument\" : true,\n   \"lineBreakBeforeEachGenericRequirement\" : true,\n   \"lineLength\" : 240,\n   \"maximumBlankLines\" : 1,\n   \"respectsExistingLineBreaks\" : true,\n   \"prioritizeKeepingFunctionOutputTogether\" : true,\n   \"noAssignmentInExpressions\" : {\n     \"allowedFunctions\" : [\n       \"XCTAssertNoThrow\",\n       \"XCTAssertThrowsError\"\n     ]\n   },\n   \"rules\" : {\n     \"NoBlockComments\" : false,\n     \"ReplaceForEachWithForLoop\" : false,\n\n     \"AllPublicDeclarationsHaveDocumentation\" : false,\n     \"AlwaysUseLiteralForEmptyCollectionInit\" : false,\n     \"AlwaysUseLowerCamelCase\" : false,\n     \"AmbiguousTrailingClosureOverload\" : true,\n     \"BeginDocumentationCommentWithOneLineSummary\" : false,\n     \"DoNotUseSemicolons\" : true,\n     \"DontRepeatTypeInStaticProperties\" : true,\n     \"FileScopedDeclarationPrivacy\" : true,\n     \"FullyIndirectEnum\" : true,\n     \"GroupNumericLiterals\" : true,\n     \"IdentifiersMustBeASCII\" : true,\n     \"NeverForceUnwrap\" : false,\n     \"NeverUseForceTry\" : false,\n     \"NeverUseImplicitlyUnwrappedOptionals\" : false,\n     \"NoAccessLevelOnExtensionDeclaration\" : true,\n     \"NoAssignmentInExpressions\" : true,\n     \"NoBlockComments\" : true,\n     \"NoCasesWithOnlyFallthrough\" : true,\n     \"NoEmptyTrailingClosureParentheses\" : true,\n     \"NoLabelsInCasePatterns\" : true,\n     \"NoLeadingUnderscores\" : false,\n     \"NoParensAroundConditions\" : true,\n     \"NoVoidReturnOnFunctionSignature\" : true,\n     \"OmitExplicitReturns\" : true,\n     \"OneCasePerLine\" : true,\n     \"OneVariableDeclarationPerLine\" : true,\n     \"OnlyOneTrailingClosureArgument\" : true,\n     \"OrderedImports\" : true,\n     \"ReplaceForEachWithForLoop\" : true,\n     \"ReturnVoidInsteadOfEmptyTuple\" : true,\n     \"UseEarlyExits\" : false,\n     \"UseExplicitNilCheckInConditions\" : false,\n     \"UseLetInEveryBoundCaseVariable\" : false,\n     \"UseShorthandTypeNames\" : true,\n     \"UseSingleLinePropertyGetter\" : false,\n     \"UseSynthesizedInitializer\" : false,\n     \"UseTripleSlashForDocumentationComments\" : true,\n     \"UseWhereClausesInForLoops\" : false,\n     \"ValidateDocumentationComments\" : false\n   }\n }\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Code of Conduct\n\nThe code of conduct for this project can be found at https://swift.org/code-of-conduct.\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "## Legal\n\nBy submitting a pull request, you represent that you have the right to license\nyour contribution to Apple and the community, and agree by submitting the patch\nthat your contributions are licensed under the Apache 2.0 license (see\n`LICENSE.txt`).\n\n\n## How to submit a bug report\n\nPlease ensure to specify the following:\n\n* Swift Cluster Membership commit hash\n* Contextual information (e.g. what you were trying to achieve with Swift Cluster Membership)\n* Simplest possible steps to reproduce\n  * More complex the steps are, lower the priority will be.\n  * A pull request with failing test case is preferred, but it's just fine to paste the test case into the issue description.\n* Anything that might be relevant in your opinion, such as:\n  * Swift version or the output of `swift --version`\n  * OS version and the output of `uname -a`\n  * Network configuration\n\n\n### Example\n\n```\nSwift Cluster Membership commit hash: 22ec043dc9d24bb011b47ece4f9ee97ee5be2757\n\nContext:\nWhile load testing my HTTP web server written with Swift Cluster Membership, I noticed\nthat one file descriptor is leaked per request.\n\nSteps to reproduce:\n1. ...\n2. ...\n3. ...\n4. ...\n\n$ swift --version\nSwift version 4.0.2 (swift-4.0.2-RELEASE)\nTarget: x86_64-unknown-linux-gnu\n\nOperating system: Ubuntu Linux 16.04 64-bit\n\n$ uname -a\nLinux beefy.machine 4.4.0-101-generic #124-Ubuntu SMP Fri Nov 10 18:29:59 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux\n\nMy system has IPv6 disabled.\n```\n\n## Writing a Patch\n\nA good Swift Cluster Membership patch is:\n\n1. Concise, and contains as few changes as needed to achieve the end result.\n2. Tested, ensuring that any tests provided failed before the patch and pass after it.\n3. Documented, adding API documentation as needed to cover new functions and properties.\n4. Adheres to our code formatting conventions and [style guide](STYLE_GUIDE.md).\n5. Accompanied by a great commit message, using our commit message template.\n\n### Code Format and Style\n\nSwift Cluster Membership uses [SwiftFormat](https://github.com/nicklockwood/SwiftFormat) to enforce the preferred [swift code format](.swiftformat). Always run SwiftFormat before committing your code. \n\n### Commit Message Template\n\nWe require that your commit messages match our template. The easiest way to do that is to get git to help you by explicitly using the template. To do that, `cd` to the root of our repository and run:\n\n    git config commit.template dev/git.commit.template\n\n### Run CI checks locally\n\nYou can run the GitHub Actions workflows locally using [act](https://github.com/nektos/act). For detailed steps on how to do this please see [https://github.com/swiftlang/github-workflows?tab=readme-ov-file#running-workflows-locally](https://github.com/swiftlang/github-workflows?tab=readme-ov-file#running-workflows-locally).\n\n## How to contribute your work\n\nPlease open a pull request at https://github.com/apple/swift-cluster-membership. Make sure the CI passes, and then wait for code review.\n"
  },
  {
    "path": "CONTRIBUTORS.txt",
    "content": "For the purpose of tracking copyright, this is the list of individuals and\norganizations who have contributed source code to Swift Cluster Membership.\n\nFor employees of an organization/company where the copyright of work done\nby employees of that company is held by the company itself, only the company\nneeds to be listed here.\n\n## COPYRIGHT HOLDERS\n\n- Apple Inc. (all contributors with '@apple.com')\n\n### Contributors\n\n- Konrad `ktoso` Malawski <ktoso@apple.com> <konrad_malawski@apple.com>\n- Dario Rexin <drexin@apple.com>\n- Anton Volokhov <avolokhov@apple.com>\n- Tom Doron <tomer@apple.com>\n"
  },
  {
    "path": "HANDBOOK.md",
    "content": "# Contributors Handbook\n\n## Glossary\n\nIn an attempt to form a shared vocabulary in the project, words used in the APIs have been carefully selected and we'd like to keep using them to refer to the same things in various implementations.\n\n- CoolAlgorithm **Instance** - we refer to a specific implementation of a membership algorithm as an \"Instance\". An instance is:\n    - SHOULD be a value type; as it makes debugging the state of an instance simpler; i.e. we can record the last 10 states of the algorithm and see how things went wrong, even in running clustered systems if necessary.\n    - is NOT performing any IO (except for logging which we after long debates, decided that it's better to allow implementations to log if configured to do so)\n    - it most likely IS a finite state machine, although may not be one in the strict meaning of the pattern. If possible to express as an FSM, we recommend doing so, or carving out pieces of the protocol which can be represented as a state machine.\n    - an instance SHOULD be easy to test in isolation, such that crazy edge cases in algorithms can be codified in easy to run and understand test cases when necessary\n- CoolAlgorithm **Shell** - inspired by shells as we know them from terminals, a shell is what handles the interaction with the environment and the instance; it is the link between the I/O and the pure instance. One can also think of it as an \"interpreter.\"\n- **Directive** - directives are how an algorithm instance may choose to interact with a Shell. Upon performing some action on an algorithm's instance it SHOULD return a directive, which will instruct the Shell to \"now do this thing, then that thing\". It \"directs the shell\" to do the right thing, following the instances protocol.\n- **Peer** - a peer is a known host that has the potential to be a cluster member; we can communicate with a peer by sending messages to it (and it may send messages to us), however a peer does not have an inherent cluster membership status, in order to have a status it must be (wrapped in a) *Member*\n    - Peers SHOULD be Unique; meaning that if node dies and spawns again using the same host/port pair, we should consider it to be a _new peer_ rather than the same peer. This is usually solved by issuing some random UUID on node startup, and including this ID in any messaging the peer performs. \n- **Cluster Member** - a member of a cluster, meaning it is _known to be (or have been) part of the cluster_ and likely has some associated cluster state (e.g. alive or dead etc.)\n    - It most likely is wrapping a Peer with additional information\n\n## Tips\n\n- When working with directives, never `return []`, always preallocate a directives array and `return directives`.\n  - there are many situations where it is good to bail out early, but many operations have some form of \"needs to always be done\"\n    in their directives. Using this pattern ensures you won't accidentally miss those directives. \n- When \"should never happen\", use `precondition`s with a lot of contextual information (including the entire instance state), \n  so users can provide you with a good crash report.\n\n## Testing tips\n\nTests have `LogCapture` installed are able to capture all logs \"per node\" and later present them in a readable output if a test fails automatically.\n\nIf you need to investigate test logs without the test failing, you can enable them like so:\n\n```swift\nfinal class SWIMNIOClusteredTests: RealClusteredXCTestCase {\n\n    override var alwaysPrintCaptureLogs: Bool {\n        true\n    }\n    \n    // ... \n\n}\n```\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "NOTICE.txt",
    "content": "\n                            The Swift Cluster Membership Project\n                            ====================================\n\nPlease visit the Swift Cluster Membership web site for more information:\n\n  * https://github.com/apple/swift-cluster-membership\n\nCopyright 2020 The Swift Cluster Membership Project\n\nThe Swift Cluster Membership Project licenses this file to you under the Apache License,\nversion 2.0 (the \"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at:\n\n  https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\nWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\nLicense for the specific language governing permissions and limitations\nunder the License.\n\nAlso, please refer to each LICENSE.<component>.txt file, which is located in\nthe 'license' directory of the distribution file, for the license terms of the\ncomponents that this product depends on.\n\n---\n\nThis product contains some modified data-structures from Apple/Swift-NIO.\n\n  * LICENSE (Apache 2.0):\n    * https://github.com/apple/swift-nio/blob/main/LICENSE.txt\n  * HOMEPAGE:\n    * https://github.com/apple/swift-nio\n\n"
  },
  {
    "path": "Package.swift",
    "content": "// swift-tools-version:5.10\n// The swift-tools-version declares the minimum version of Swift required to build this package.\n\nimport PackageDescription\n\nimport class Foundation.ProcessInfo\n\n// Workaround: Since we cannot include the flat just as command line options since then it applies to all targets,\n// and ONE of our dependencies currently produces one warning, we have to use this workaround to enable it in _our_\n// targets when the flag is set. We should remove the dependencies and then enable the flag globally though just by passing it.\nlet globalSwiftSettings: [SwiftSetting]\nif ProcessInfo.processInfo.environment[\"WARNINGS_AS_ERRORS\"] != nil {\n    print(\"WARNINGS_AS_ERRORS enabled, passing `-warnings-as-errors`\")\n    globalSwiftSettings = [\n        SwiftSetting.unsafeFlags([\"-warnings-as-errors\"])\n    ]\n} else {\n    globalSwiftSettings = []\n}\n\nvar targets: [PackageDescription.Target] = [\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: SWIM\n\n    .target(\n        name: \"ClusterMembership\",\n        dependencies: []\n    ),\n\n    .target(\n        name: \"SWIM\",\n        dependencies: [\n            \"ClusterMembership\",\n            .product(name: \"Logging\", package: \"swift-log\"),\n            .product(name: \"Metrics\", package: \"swift-metrics\"),\n        ]\n    ),\n\n    .target(\n        name: \"SWIMNIOExample\",\n        dependencies: [\n            \"SWIM\",\n            .product(name: \"NIO\", package: \"swift-nio\"),\n            .product(name: \"NIOFoundationCompat\", package: \"swift-nio\"),\n            .product(name: \"NIOConcurrencyHelpers\", package: \"swift-nio\"),\n            .product(name: \"NIOExtras\", package: \"swift-nio-extras\"),\n\n            .product(name: \"Logging\", package: \"swift-log\"),\n            .product(name: \"Metrics\", package: \"swift-metrics\"),\n        ]\n    ),\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Other Membership Protocols ...\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Documentation\n\n    .testTarget(\n        name: \"ClusterMembershipDocumentationTests\",\n        dependencies: [\n            \"SWIM\"\n        ]\n    ),\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Tests\n\n    .testTarget(\n        name: \"ClusterMembershipTests\",\n        dependencies: [\n            \"ClusterMembership\"\n        ]\n    ),\n\n    .testTarget(\n        name: \"SWIMTests\",\n        dependencies: [\n            \"SWIM\",\n            \"SWIMTestKit\",\n        ]\n    ),\n\n    .testTarget(\n        name: \"SWIMNIOExampleTests\",\n        dependencies: [\n            \"SWIMNIOExample\",\n            \"SWIMTestKit\",\n        ]\n    ),\n\n    // NOT FOR PUBLIC CONSUMPTION.\n    .testTarget(\n        name: \"SWIMTestKit\",\n        dependencies: [\n            \"SWIM\",\n            .product(name: \"NIO\", package: \"swift-nio\"),\n            .product(name: \"Logging\", package: \"swift-log\"),\n            .product(name: \"Metrics\", package: \"swift-metrics\"),\n        ]\n    ),\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Samples are defined in Samples/Package.swift\n    // ==== ------------------------------------------------------------------------------------------------------------\n]\n\nvar dependencies: [Package.Dependency] = [\n    .package(url: \"https://github.com/apple/swift-nio.git\", from: \"2.19.0\"),\n    .package(url: \"https://github.com/apple/swift-nio-ssl.git\", from: \"2.8.0\"),\n    .package(url: \"https://github.com/apple/swift-nio-extras.git\", from: \"1.5.1\"),\n\n    // ~~~ SSWG APIs ~~~\n    .package(url: \"https://github.com/apple/swift-log.git\", from: \"1.4.0\"),\n    .package(url: \"https://github.com/apple/swift-metrics.git\", \"2.3.2\"..<\"3.0.0\"),  // since latest\n\n]\n\nlet products: [PackageDescription.Product] = [\n    .library(\n        name: \"ClusterMembership\",\n        targets: [\"ClusterMembership\"]\n    ),\n    .library(\n        name: \"SWIM\",\n        targets: [\"SWIM\"]\n    ),\n    .library(\n        name: \"SWIMNIOExample\",\n        targets: [\"SWIMNIOExample\"]\n    ),\n]\n\nvar package = Package(\n    name: \"swift-cluster-membership\",\n    platforms: [\n        .macOS(.v13),\n        .iOS(.v16),\n        .tvOS(.v16),\n        .watchOS(.v9),\n    ],\n    products: products,\n\n    dependencies: dependencies,\n\n    targets: targets.map { target in\n        var swiftSettings = target.swiftSettings ?? []\n        swiftSettings.append(contentsOf: globalSwiftSettings)\n        if !swiftSettings.isEmpty {\n            target.swiftSettings = swiftSettings\n        }\n        return target\n    },\n\n    cxxLanguageStandard: .cxx11\n)\n\n// ---    STANDARD CROSS-REPO SETTINGS DO NOT EDIT   --- //\nfor target in package.targets {\n    switch target.type {\n    case .regular, .test, .executable:\n        var settings = target.swiftSettings ?? []\n        // https://github.com/swiftlang/swift-evolution/blob/main/proposals/0444-member-import-visibility.md\n        settings.append(.enableUpcomingFeature(\"MemberImportVisibility\"))\n        target.swiftSettings = settings\n    case .macro, .plugin, .system, .binary:\n        ()  // not applicable\n    @unknown default:\n        ()  // we don't know what to do here, do nothing\n    }\n}\n// --- END: STANDARD CROSS-REPO SETTINGS DO NOT EDIT --- //\n"
  },
  {
    "path": "README.md",
    "content": "# Swift Cluster Membership\n\nThis library aims to help Swift make ground in a new space: clustered multi-node distributed systems. \n\nWith this library we provide reusable runtime agnostic membership protocol implementations which can be adopted in various clustering use-cases.\n\n## Background\n\nCluster membership protocols are a crucial building block for distributed systems, such as computation intensive clusters, schedulers, databases, key-value stores and more. With the announcement of this package, we aim to make building such systems simpler, as they no longer need to rely on external services to handle service membership for them. We would also like to invite the community to collaborate on and develop additional membership protocols.\n\nAt their core, membership protocols need to provide an answer for the question \"Who are my (live) peers?\". This seemingly simple task turns out to be not so simple at all in a distributed system where delayed or lost messages, network partitions, and unresponsive but still \"alive\" nodes are the daily bread and butter. Providing a predictable, reliable answer to this question is what cluster membership protocols do.\n\nThere are various trade-offs one can take while implementing a membership protocol, and it continues to be an interesting area of research and continued refinement. As such, the cluster-membership package intends to focus not on a single implementation, but serve as a collaboration space for various distributed algorithms in this space.\n\n## 🏊🏾‍♀️🏊🏻‍♀️🏊🏾‍♂️🏊🏼‍♂️ SWIMming with Swift\n\n### High-level Protocol Description\n\n> For a more in-depth discussion of the protocol and modifications in this implementation we suggest reading the [SWIM API Documentation](https://apple.github.io/swift-cluster-membership/docs/current/SWIM/Enums/SWIM.html), as well as the associated papers linked below.\n\nThe [*Scalable Weakly-consistent Infection-style process group Membership*](https://research.cs.cornell.edu/projects/Quicksilver/public_pdfs/SWIM.pdf) algorithm (also known as \"SWIM\"), along with a few notable protocol extensions as documented in the 2018 [*Lifeguard: Local Health Awareness for More Accurate Failure Detection*](https://arxiv.org/abs/1707.00788) paper.\n\nSWIM is a [gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) in which peers periodically exchange bits of information about their observations of other nodes’ statuses, eventually spreading the information to all other members in a cluster. This category of distributed algorithms are very resilient against arbitrary message loss, network partitions and similar issues.\n\nAt a high level, SWIM works like this: \n\n* A member periodically pings a \"randomly\" selected peer it is aware of. It does so by sending a .ping message to that peer, expecting an [`.ack`](https://apple.github.io/swift-cluster-membership/docs/current/SWIM/Protocols/SWIMPingOriginPeer.html#/s:4SWIM18SWIMPingOriginPeerP3ack13acknowledging6target11incarnation7payloadys6UInt32V_AA8SWIMPeer_ps6UInt64VA2AO13GossipPayloadOtF) to be sent back. See how `A` probes `B` initially in the diagram below.\n    * The exchanged messages also carry a gossip `payload`, which is (partial) information about what other peers the sender of the message is aware of, along with their membership status (`.alive`, `.suspect`, etc.)\n* If it receives an `.ack`, the peer is considered still `.alive`. Otherwise, the target peer might have terminated/crashed or is unresponsive for other reasons. \n    * In order to double check if the peer really is dead, the origin asks a few other peers about the state of the unresponsive peer by sending `.pingRequest` messages to a configured number of other peers, which then issue direct pings to that peer (probing peer E in the diagram below).\n* If those pings fail, due to lack of .acks resulting in the peer being marked as `.suspect`,\n    * Our protocol implementation will also use additional `.nack` (\"negative acknowledgement\") messages in the situation to inform the ping request origin that the intermediary did receive those `.pingRequest` messages, however the target seems to not have responded. We use this information to adjust a Local Health Multiplier, which affects how timeouts are calculated. To learn more about this refer to the API docs and the Lifeguard paper.\n\n![SWIM: Messages Examples](Sources/SWIM/Docs.docc/images/ping_pingreq_cycle.svg)\n\nThe above mechanism, serves not only as a failure detection mechanism, but also as a gossip mechanism, which carries information about known members of the cluster. This way members eventually learn about the status of their peers, even without having them all listed upfront. It is worth pointing out however that this membership view is [weakly-consistent](https://en.wikipedia.org/wiki/Weak_consistency), which means there is no guarantee (or way to know, without additional information) if all members have the same exact view on the membership at any given point in time. However, it is an excellent building block for higher-level tools and systems to build their stronger guarantees on top.\n\nOnce the failure detection mechanism detects an unresponsive node, it eventually is marked as  .dead resulting in its irrevocable removal from the cluster. Our implementation offers an optional extension, adding an .unreachable state to the possible states, however most users will not find it necessary and it is disabled by default. For details and rules rules about legal status transitions refer to [SWIM.Status](https://github.com/apple/swift-cluster-membership/blob/main/Sources/SWIM/Status.swift#L18-L39) or the following diagram:\n\n![SWIM: Lifecycle Diagram](Sources/SWIM/Docs.docc/images/swim_lifecycle.svg)\n\nThe way Swift Cluster Membership implements protocols, is by offering \"`Instances`\" of them. For example, the SWIM implementation is encapsulated in the runtime agnostic [`SWIM.Instance`](https://github.com/apple/swift-cluster-membership/blob/main/Sources/SWIM/SWIMInstance.swift) which needs to be “driven” or “interpreted” by some glue code between a networking runtime and the instance itself. We call those glue pieces of an implementation \"`Shell`s\", and the library ships with a `SWIMNIOShell` implemented using [SwiftNIO](https://www.github.com/apple/swift-nio)’s `DatagramChannel` that performs all messaging asynchronously over [UDP](https://searchnetworking.techtarget.com/definition/UDP-User-Datagram-Protocol). Alternative implementations can use completely different transports, or piggy back SWIM messages on some other existing gossip system etc.\n\nThe SWIM instance also has built-in support for emitting metrics (using [swift-metrics](https://github.com/apple/swift-metrics)) and can be configured to log details about internal details by passing a [swift-log](https://github.com/apple/swift-log) `Logger`.\n\n### Example: Reusing the SWIM protocol logic implementation\n\nThe primary purpose of this library is to share the `SWIM.Instance` implementation across various implementations which need some form of in-process membership service. Implementing a custom runtime is documented in depth in the project’s README (https://github.com/apple/swift-cluster-membership/), so please have a look there if you are interested in implementing SWIM over some different transport.\n\nImplementing a new transport boils down a “fill in the blanks” exercise: \n\nFirst, one has to implement the Peer protocols (https://github.com/apple/swift-cluster-membership/blob/main/Sources/SWIM/Peer.swift) using one’s target transport:\n\n```swift\n/// SWIM peer which can be initiated contact with, by sending ping or ping request messages.\npublic protocol SWIMPeer: SWIMAddressablePeer {\n    /// Perform a probe of this peer by sending a `ping` message.\n    /// \n    /// <... more docs here - please refer to the API docs for the latest version ...>\n    func ping(\n        payload: SWIM.GossipPayload,\n        from origin: SWIMPingOriginPeer,\n        timeout: DispatchTimeInterval,\n        sequenceNumber: SWIM.SequenceNumber\n    ) async throws -> SWIM.PingResponse\n    \n    // ... \n}\n```\n\nWhich usually means wrapping some connection, channel, or other identity with the ability to send messages and invoke the appropriate callbacks when applicable. \n\nThen, on the receiving end of a peer, one has to implement receiving those messages and invoke all the corresponding `on<SomeMessage>(...)` callbacks defined on the `SWIM.Instance` (grouped under [SWIMProtocol](https://github.com/apple/swift-cluster-membership/blob/main/Sources/SWIM/SWIMInstance.swift#L24-L85)).\n\nA piece of the SWIMProtocol is listed below to give you an idea about it:\n\n\n```swift\npublic protocol SWIMProtocol {\n\n    /// MUST be invoked periodically, in intervals of `self.swim.dynamicLHMProtocolInterval`.\n    ///\n    /// MUST NOT be scheduled using a \"repeated\" task/timer\", as the interval is dynamic and may change as the algorithm proceeds.\n    /// Implementations should schedule each next tick by handling the returned directive's `scheduleNextTick` case,\n    /// which includes the appropriate delay to use for the next protocol tick.\n    ///\n    /// This is the heart of the protocol, as each tick corresponds to a \"protocol period\" in which:\n    /// - suspect members are checked if they're overdue and should become `.unreachable` or `.dead`,\n    /// - decisions are made to `.ping` a random peer for fault detection,\n    /// - and some internal house keeping is performed.\n    ///\n    /// Note: This means that effectively all decisions are made in interval sof protocol periods.\n    /// It would be possible to have a secondary periodic or more ad-hoc interval to speed up\n    /// some operations, however this is currently not implemented and the protocol follows the fairly\n    /// standard mode of simply carrying payloads in periodic ping messages.\n    ///\n    /// - Returns: `SWIM.Instance.PeriodicPingTickDirective` which must be interpreted by a shell implementation\n    mutating func onPeriodicPingTick() -> [SWIM.Instance.PeriodicPingTickDirective]\n\n    mutating func onPing( ... ) -> [SWIM.Instance.PingDirective]\n\n    mutating func onPingRequest( ... ) -> [SWIM.Instance.PingRequestDirective]\n\n    mutating func onPingResponse( ... ) -> [SWIM.Instance.PingResponseDirective]\n\n    // ... \n}\n```\n\nThese calls perform all SWIM protocol specific tasks internally, and return directives which are simple to interpret “commands” to an implementation about how it should react to the message. For example, upon receiving a `.pingRequest` message, the returned directive may instruct a shell to send a ping to some nodes. The directive prepares all apropriate target, timeout and additional information that makes it simpler to simply follow its instruction and implement the call correctly, e.g. like this:\n\n```swift\nself.swim.onPingRequest(\n    target: target,\n    pingRequestOrigin: pingRequestOrigin,            \n    payload: payload,\n    sequenceNumber: sequenceNumber\n).forEach { directive in\n    switch directive {\n    case .gossipProcessed(let gossipDirective):\n        self.handleGossipPayloadProcessedDirective(gossipDirective)\n\n    case .sendPing(let target, let payload, let pingRequestOriginPeer, let pingRequestSequenceNumber, let timeout, let sequenceNumber):\n        self.sendPing(\n            to: target,\n            payload: payload,\n            pingRequestOrigin: pingRequestOriginPeer,\n            pingRequestSequenceNumber: pingRequestSequenceNumber,\n            timeout: timeout,\n            sequenceNumber: sequenceNumber\n        )\n    }\n}\n```\n\nIn general this allows for all the tricky \"what to do when\" to be encapsulated within the protocol instance, and a Shell only has to follow instructions implementing them. The actual implementations will often need to perform some more involved concurrency and networking tasks, like awaiting for a sequence of responses, and handling them in a specific way etc, however the general outline of the protocol is orchestrated by the instance's directives.\n\nFor detailed documentation about each of the callbacks, when to invoke them, and how all this fits together, please refer to the [**API Documentation**](https://apple.github.io/swift-cluster-membership/docs/current/SWIM/index.html).\n\n### Example: SWIMming with Swift NIO\n\nThe repository contains an [end-to-end example](Samples/Sources/SWIMNIOSampleCluster) and an example implementation called [SWIMNIOExample](Sources/SWIMNIOExample) which makes use of the `SWIM.Instance` to enable a simple UDP based peer monitoring system. This allows peers to gossip and notify each other about node failures using the SWIM protocol by sending datagrams driven by SwiftNIO.\n\n> 📘 The `SWIMNIOExample` implementation is offered only as an example, and has not been implemented with production use in mind, however with some amount of effort it could definitely do well for some use-cases. If you are interested in learning more about cluster membership algorithms, scalability benchmarking and using SwiftNIO itself, this is a great module to get your feet wet, and perhaps once the module is mature enough we could consider making it not only an example, but a reusable component for Swift NIO based clustered applications.\n\nIn it’s simplest form, combining the provided SWIM instance and NIO shell to build a simple server, one can embedd the provided handlers like shown below, in a typical NIO channel pipeline:\n\n```swift\nlet bootstrap = DatagramBootstrap(group: group)\n    .channelOption(ChannelOptions.socketOption(.so_reuseaddr), value: 1)\n    .channelInitializer { channel in\n        channel.pipeline\n            // first install the SWIM handler, which contains the SWIMNIOShell:\n            .addHandler(SWIMNIOHandler(settings: settings)).flatMap {\n                // then install some user handler, it will receive SWIM events:\n                channel.pipeline.addHandler(SWIMNIOExampleHandler())\n        }\n    }\n\nbootstrap.bind(host: host, port: port)\n```\n\nThe example handler can then receive and handle SWIM cluster membership change events:\n\n```swift\nfinal class SWIMNIOExampleHandler: ChannelInboundHandler {\n    public typealias InboundIn = SWIM.MemberStatusChangedEvent\n    \n    let log = Logger(label: \"SWIMNIOExampleHandler\")\n    \n    public func channelRead(context: ChannelHandlerContext, data: NIOAny) {\n        let change: SWIM.MemberStatusChangedEvent = self.unwrapInboundIn(data)\n\n        self.log.info(\"Membership status changed: [\\(change.member.node)] is now [\\(change.status)]\", metadata: [    \n            \"swim/member\": \"\\(change.member.node)\",\n            \"swim/member/status\": \"\\(change.status)\",\n        ])\n    }\n}\n```\n\nIf you are interested in contributing and polishing up the SWIMNIO implementation please head over to the issues and pick up a task or propose an improvement yourself!\n\n## Additional Membership Protocol Implementations\n\nWe are generally interested in fostering discussions and implementations of additional membership implementations using a similar \"Instance\" style.\n\nIf you are interested in such algorithms, and have a favourite protocol that you'd like to see implemented, please do not hesitate to reach out here via issues or the [Swift forums](https://forums.swift.org/c/server).\n\n## Contributing\n\nExperience reports, feedback, improvement ideas and contributions are greatly encouraged! \nWe look forward to hear from you.\n\nPlease refer to [CONTRIBUTING](CONTRIBUTING.md) guide to learn about the process of submitting pull requests,\nand refer to the [HANDBOOK](HANDBOOK.md) for terminology and other useful tips for working with this library.\n\n\n"
  },
  {
    "path": "Samples/.gitignore",
    "content": "# The XPC sample generates an .app, so we want to ignore it\n*.app\n"
  },
  {
    "path": "Samples/Package.swift",
    "content": "// swift-tools-version:5.10\n// The swift-tools-version declares the minimum version of Swift required to build this package.\n\nimport PackageDescription\n\nvar targets: [PackageDescription.Target] = [\n    .target(\n        name: \"SWIMNIOSampleCluster\",\n        dependencies: [\n            .product(name: \"SWIM\", package: \"swift-cluster-membership\"),\n            .product(name: \"SWIMNIOExample\", package: \"swift-cluster-membership\"),\n            .product(name: \"SwiftPrometheus\", package: \"SwiftPrometheus\"),\n            .product(name: \"Lifecycle\", package: \"swift-service-lifecycle\"),\n            .product(name: \"ArgumentParser\", package: \"swift-argument-parser\"),\n        ],\n        path: \"Sources/SWIMNIOSampleCluster\"\n    ),\n\n    /* --- tests --- */\n\n    // no-tests placeholder project to not have `swift test` fail on Samples/\n    .testTarget(\n        name: \"NoopTests\",\n        dependencies: [\n            .product(name: \"SWIM\", package: \"swift-cluster-membership\")\n        ],\n        path: \"Tests/NoopTests\"\n    ),\n]\n\nvar dependencies: [Package.Dependency] = [\n    // ~~~~~~~     parent       ~~~~~~~\n    .package(path: \"../\"),\n\n    // ~~~~~~~ only for samples ~~~~~~~\n\n    .package(url: \"https://github.com/swift-server/swift-service-lifecycle.git\", from: \"1.0.0-alpha\"),\n    .package(url: \"https://github.com/MrLotU/SwiftPrometheus.git\", from: \"1.0.0-alpha\"),\n    .package(url: \"https://github.com/apple/swift-argument-parser\", from: \"0.2.0\"),\n]\n\nlet package = Package(\n    name: \"swift-cluster-membership-samples\",\n    platforms: [\n        .macOS(.v13)\n    ],\n    products: [\n        .executable(\n            name: \"SWIMNIOSampleCluster\",\n            targets: [\"SWIMNIOSampleCluster\"]\n        )\n\n    ],\n\n    dependencies: dependencies,\n\n    targets: targets,\n\n    cxxLanguageStandard: .cxx11\n)\n"
  },
  {
    "path": "Samples/README.md",
    "content": "## Sample applications\n\nUse `swift run` to run the samples.\n\n### SWIMNIOSampleCluster\n\nThis sample app runs a _single node_ per process, however it is prepared to be easily clustered up. \nThis mode of operation is useful to manually suspend or stop processes and see those issues be picked up by the SWIM implementation.\n\nRecommended way to run:\n\n```bash\n# cd swift-cluster-membership\n> swift run --package-path Samples SWIMNIOSampleCluster --help\n```\n\nwhich uses [swift argument parser](https://github.com/apple/swift-argument-parser) list all the options the sample app has available.\n\nAn example invocation would be:\n\n```bash\nswift run --package-path Samples SWIMNIOSampleCluster --port 7001\n```\n\nwhich spawns a node on `127.0.0.1:7001`, to spawn another node to join it and form a two node cluster you can:\n\n```bash\nswift run --package-path Samples SWIMNIOSampleCluster --port 7002 --initial-contact-points 127.0.0.1:7001,127.0.0.1:7003\n\n# you can list multiple peers as contact points like this:\nswift run --package-path Samples SWIMNIOSampleCluster --port 7003 --initial-contact-points 127.0.0.1:7001,127.0.0.1:7002\n``` \n\nOnce the cluster is formed, you'll see messages logged by the `SWIMNIOSampleHandler` showing when nodes become alive or dead.\n\nYou can enable debug or trace level logging to inspect more of the details of what is going on internally in the nodes.\n\nTo see the failure detection in action, you can stop processes, or \"suspend\" them for a little while by doing \n\n```bash\n# swift run --package-path Samples SWIMNIOSampleCluster --port 7001\n<ctrl +z>\n^Z\n[1]  + 35002 suspended  swift run --package-path Samples SWIMNIOSampleCluster --port 7001 \n$ fg %1 \n```\n\nto resume the node; This can be useful to poke around and manually get a feel about how the failure detection works.\nYou can also hook the systems up to a metrics dashboard to see the information propagate in real time (once it is instrumented using swift-metrics).\n"
  },
  {
    "path": "Samples/Sources/SWIMNIOSampleCluster/SWIMNIOSampleNode.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\nimport NIO\nimport SWIM\nimport SWIMNIOExample\n\nstruct SampleSWIMNIONode {\n    let port: Int\n    var settings: SWIMNIO.Settings\n\n    let group: EventLoopGroup\n\n    init(port: Int, settings: SWIMNIO.Settings, group: EventLoopGroup) {\n        self.port = port\n        self.settings = settings\n        self.group = group\n    }\n\n    func start() {\n        let bootstrap = DatagramBootstrap(group: group)\n            .channelOption(ChannelOptions.socketOption(.so_reuseaddr), value: 1)\n            .channelInitializer { channel in\n                channel.pipeline\n                    .addHandler(SWIMNIOHandler(settings: self.settings)).flatMap {\n                        channel.pipeline.addHandler(SWIMNIOSampleHandler())\n                    }\n            }\n\n        bootstrap.bind(host: \"127.0.0.1\", port: port).whenComplete { result in\n            switch result {\n            case .success(let res):\n                self.settings.logger.info(\"Bound to: \\(res)\")\n                ()\n            case .failure(let error):\n                self.settings.logger.error(\"Error: \\(error)\")\n                ()\n            }\n        }\n    }\n\n}\n\nfinal class SWIMNIOSampleHandler: ChannelInboundHandler {\n    typealias InboundIn = SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>\n\n    let log = Logger(label: \"SWIMNIOSample\")\n\n    public func channelRead(context: ChannelHandlerContext, data: NIOAny) {\n        let change: SWIM.MemberStatusChangedEvent = self.unwrapInboundIn(data)\n\n        // we log each event (in a pretty way)\n        self.log.info(\n            \"Membership status changed: [\\(change.member.node)] is now [\\(change.status)]\",\n            metadata: [\n                \"swim/member\": \"\\(change.member.node)\",\n                \"swim/member/previousStatus\": \"\\(change.previousStatus.map({\"\\($0)\"}) ?? \"unknown\")\",\n                \"swim/member/status\": \"\\(change.status)\",\n            ]\n        )\n    }\n}\n"
  },
  {
    "path": "Samples/Sources/SWIMNIOSampleCluster/main.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ArgumentParser\nimport ClusterMembership\nimport Lifecycle\nimport Logging\nimport Metrics\nimport NIO\nimport Prometheus\nimport SWIM\nimport SWIMNIOExample\n\nstruct SWIMNIOSampleCluster: ParsableCommand {\n    @Option(name: .shortAndLong, help: \"The number of nodes to start, defaults to: 1\")\n    var count: Int?\n\n    @Argument(help: \"Hostname that node(s) should bind to\")\n    var host: String?\n\n    @Option(help: \"Determines which this node should bind to; Only effective when running a single node\")\n    var port: Int?\n\n    @Option(help: \"Configures which nodes should be passed in as initial contact points, format: host:port,\")\n    var initialContactPoints: String = \"\"\n\n    @Option(help: \"Configures log level\")\n    var logLevel: String = \"info\"\n\n    mutating func run() throws {\n        LoggingSystem.bootstrap(_SWIMPrettyMetadataLogHandler.init)\n        let group = MultiThreadedEventLoopGroup(numberOfThreads: System.coreCount)\n\n        // Uncomment this if you'd like to see metrics displayed in the command line periodically;\n        // This bootstraps and uses the Prometheus metrics backend to report metrics periodically by printing them to the stdout (console).\n        //\n        // Note though that this will be a bit noisy, since logs are also emitted to the stdout by default, however it's a nice way\n        // to learn and explore what the metrics are and how they behave when toying around with a local cluster.\n        //        let prom = PrometheusClient()\n        //        MetricsSystem.bootstrap(prom)\n        //\n        //        group.next().scheduleRepeatedTask(initialDelay: .seconds(1), delay: .seconds(10)) { _ in\n        //             prom.collect { (string: String) in\n        //                 print(\"\")\n        //                 print(\"\")\n        //                 print(string)\n        //             }\n        //        }\n\n        let lifecycle = ServiceLifecycle()\n        lifecycle.registerShutdown(\n            label: \"eventLoopGroup\",\n            .sync(group.syncShutdownGracefully)\n        )\n\n        var settings = SWIMNIO.Settings()\n        if count == nil || count == 1 {\n            let nodePort = self.port ?? 7001\n            settings.logger = Logger(label: \"swim-\\(nodePort)\")\n            settings.logger.logLevel = self.parseLogLevel()\n            settings.swim.logger.logLevel = self.parseLogLevel()\n\n            settings.swim.initialContactPoints = self.parseContactPoints()\n\n            let node = SampleSWIMNIONode(port: nodePort, settings: settings, group: group)\n            lifecycle.register(\n                label: \"swim-\\(nodePort)\",\n                start: .sync { node.start() },\n                shutdown: .sync {}\n            )\n\n        } else {\n            let basePort = port ?? 7001\n            for i in 1...(count ?? 1) {\n                let nodePort = basePort + i\n\n                settings.logger = Logger(label: \"swim-\\(nodePort)\")\n                settings.swim.initialContactPoints = self.parseContactPoints()\n\n                let node = SampleSWIMNIONode(\n                    port: nodePort,\n                    settings: settings,\n                    group: group\n                )\n\n                lifecycle.register(\n                    label: \"swim\\(nodePort)\",\n                    start: .sync { node.start() },\n                    shutdown: .sync {}\n                )\n            }\n        }\n\n        try lifecycle.startAndWait()\n    }\n\n    private func parseLogLevel() -> Logger.Level {\n        guard let level = Logger.Level.init(rawValue: self.logLevel) else {\n            fatalError(\"Unknown log level: \\(self.logLevel)\")\n        }\n        return level\n    }\n\n    private func parseContactPoints() -> Set<ClusterMembership.Node> {\n        guard self.initialContactPoints.trimmingCharacters(in: .whitespacesAndNewlines) != \"\" else {\n            return []\n        }\n\n        let contactPoints: [Node] = self.initialContactPoints.split(separator: \",\").map { hostPort in\n            let host = String(hostPort.split(separator: \":\")[0])\n            let port = Int(String(hostPort.split(separator: \":\")[1]))!\n\n            return Node(protocol: \"udp\", host: host, port: port, uid: nil)\n        }\n\n        return Set(contactPoints)\n    }\n}\n\nSWIMNIOSampleCluster.main()\n"
  },
  {
    "path": "Samples/Tests/NoopTests/SampleTest.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2019 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport SWIM\nimport XCTest\n\nfinal class SampleTest: XCTestCase {\n    func test_empty() {\n        // nothing here (so far...)\n    }\n}\n"
  },
  {
    "path": "Sources/ClusterMembership/Node.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\n/// Generic representation of a (potentially unique, if `uid` is present) node in a cluster.\n///\n/// Generally the node represents \"some node we want to contact\" if the `uid` is not set,\n/// and if the `uid` is available \"the specific instance of a node\".\npublic struct Node: Hashable, Sendable, Comparable, CustomStringConvertible {\n    /// Protocol that can be used to contact this node;\n    /// Does not have to be a formal protocol name and may be \"swim\" or a name which is understood by a membership implementation.\n    public var `protocol`: String\n    public var name: String?\n    public var host: String\n    public var port: Int\n\n    public internal(set) var uid: UInt64?\n\n    public init(protocol: String, host: String, port: Int, uid: UInt64?) {\n        self.protocol = `protocol`\n        self.name = nil\n        self.host = host\n        self.port = port\n        self.uid = uid\n    }\n\n    public init(protocol: String, name: String?, host: String, port: Int, uid: UInt64?) {\n        self.protocol = `protocol`\n        if let name = name, name.isEmpty {\n            self.name = nil\n        } else {\n            self.name = name\n        }\n        self.host = host\n        self.port = port\n        self.uid = uid\n    }\n\n    public var withoutUID: Self {\n        var without = self\n        without.uid = nil\n        return without\n    }\n\n    public var description: String {\n        // /// uid is not printed by default since we only care about it when we do, not in every place where we log a node\n        // \"\\(self.protocol)://\\(self.host):\\(self.port)\"\n        self.detailedDescription\n    }\n\n    /// Prints a node's String representation including its `uid`.\n    public var detailedDescription: String {\n        \"\\(self.protocol)://\\(self.name.map { \"\\($0)@\" } ?? \"\")\\(self.host):\\(self.port)\\(self.uid.map { \"#\\($0.description)\" } ?? \"\")\"\n    }\n}\n\nextension Node {\n    // Silly but good enough comparison for deciding \"who is lower node\"\n    // as we only use those for \"tie-breakers\" any ordering is fine to be honest here.\n    public static func < (lhs: Node, rhs: Node) -> Bool {\n        if lhs.protocol == rhs.protocol, lhs.host == rhs.host {\n            if lhs.port == rhs.port {\n                return (lhs.uid ?? 0) < (rhs.uid ?? 0)\n            } else {\n                return lhs.port < rhs.port\n            }\n        } else {\n            // \"silly\" but good enough comparison, we just need a predictable order, does not really matter what it is\n            return \"\\(lhs.protocol)\\(lhs.host)\" < \"\\(rhs.protocol)\\(rhs.host)\"\n        }\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/Docs.docc/index.md",
    "content": "# ``SWIM``\n\nThis library aims to help Swift make ground in a new space: clustered multi-node distributed systems.\n\n## Overview\n\nWith this library we provide reusable runtime agnostic membership protocol implementations which can be adopted in various clustering use-cases.\n\n### Background\n\nCluster membership protocols are a crucial building block for distributed systems, such as computation intensive clusters, schedulers, databases, key-value stores and more. With the announcement of this package, we aim to make building such systems simpler, as they no longer need to rely on external services to handle service membership for them. We would also like to invite the community to collaborate on and develop additional membership protocols.\n\nAt their core, membership protocols need to provide an answer for the question \"Who are my (live) peers?\". This seemingly simple task turns out to be not so simple at all in a distributed system where delayed or lost messages, network partitions, and unresponsive but still \"alive\" nodes are the daily bread and butter. Providing a predictable, reliable answer to this question is what cluster membership protocols do.\n\nThere are various trade-offs one can take while implementing a membership protocol, and it continues to be an interesting area of research and continued refinement. As such, the cluster-membership package intends to focus not on a single implementation, but serve as a collaboration space for various distributed algorithms in this space.\n\n### 🏊🏾‍♀️🏊🏻‍♀️🏊🏾‍♂️🏊🏼‍♂️ SWIMming with Swift\n\n#### High-level Protocol Description\n\n> For a more in-depth discussion of the protocol and modifications in this implementation we suggest reading the [SWIM API Documentation](https://apple.github.io/swift-cluster-membership/docs/current/SWIM/Enums/SWIM.html), as well as the associated papers linked below.\n\nThe [*Scalable Weakly-consistent Infection-style process group Membership*](https://research.cs.cornell.edu/projects/Quicksilver/public_pdfs/SWIM.pdf) algorithm (also known as \"SWIM\"), along with a few notable protocol extensions as documented in the 2018 [*Lifeguard: Local Health Awareness for More Accurate Failure Detection*](https://arxiv.org/abs/1707.00788) paper.\n\nSWIM is a [gossip protocol](https://en.wikipedia.org/wiki/Gossip_protocol) in which peers periodically exchange bits of information about their observations of other nodes’ statuses, eventually spreading the information to all other members in a cluster. This category of distributed algorithms are very resilient against arbitrary message loss, network partitions and similar issues.\n\nAt a high level, SWIM works like this:\n\n* A member periodically pings a \"randomly\" selected peer it is aware of. It does so by sending a `.ping` message to that peer, expecting an [`.ack`](https://apple.github.io/swift-cluster-membership/docs/current/SWIM/Protocols/SWIMPingOriginPeer.html#/s:4SWIM18SWIMPingOriginPeerP3ack13acknowledging6target11incarnation7payloadys6UInt32V_AA8SWIMPeer_ps6UInt64VA2AO13GossipPayloadOtF) to be sent back. See how `A` probes `B` initially in the diagram below.\n    * The exchanged messages also carry a gossip `payload`, which is (partial) information about what other peers the sender of the message is aware of, along with their membership status (`.alive`, `.suspect`, etc.)\n* If it receives an `.ack`, the peer is considered still `.alive`. Otherwise, the target peer might have terminated/crashed or is unresponsive for other reasons.\n    * In order to double-check if the peer really is dead, the origin asks a few other peers about the state of the unresponsive peer by sending `.pingRequest` messages to a configured number of other peers, which then issue direct pings to that peer (probing peer E in the diagram below).\n* If those pings fail, due to lack of `.ack`s resulting in the peer being marked as `.suspect`,\n    * Our protocol implementation will also use additional `.nack` (\"negative acknowledgement\") messages in the situation to inform the ping request origin that the intermediary did receive those `.pingRequest` messages, however the target seems to not have responded. We use this information to adjust a Local Health Multiplier, which affects how timeouts are calculated. To learn more about this refer to the API docs and the Lifeguard paper.\n\n![SWIM: Messages Examples](ping_pingreq_cycle.png)\n\nThe above mechanism serves not only as a failure detection mechanism, but also as a gossip mechanism, which carries information about known members of the cluster. This way members eventually learn about the status of their peers, even without having them all listed upfront. It is worth pointing out however that this membership view is [weakly-consistent](https://en.wikipedia.org/wiki/Weak_consistency), which means there is no guarantee (or way to know, without additional information) if all members have the same exact view on the membership at any given point in time. However, it is an excellent building block for higher-level tools and systems to build their stronger guarantees on top.\n\nOnce the failure detection mechanism detects an unresponsive node, it eventually is marked as `.dead` resulting in its irrevocable removal from the cluster. Our implementation offers an optional extension, adding an `.unreachable` state to the possible states, however most users will not find it necessary and it is disabled by default. For details and rules about legal status transitions refer to [SWIM.Status](https://github.com/apple/swift-cluster-membership/blob/main/Sources/SWIM/Status.swift#L18-L39) or the following diagram:\n\n![SWIM: Lifecycle Diagram](swim_lifecycle.png)\n\nThe way Swift Cluster Membership implements protocols is by offering \"`Instances`\" of them. For example, the SWIM implementation is encapsulated in the runtime agnostic [`SWIM.Instance`](https://github.com/apple/swift-cluster-membership/blob/main/Sources/SWIM/SWIMInstance.swift) which needs to be “driven” or “interpreted” by some glue code between a networking runtime and the instance itself. We call those glue pieces of an implementation \"`Shell`s\", and the library ships with a `SWIMNIOShell` implemented using [SwiftNIO](https://www.github.com/apple/swift-nio)’s `DatagramChannel` that performs all messaging asynchronously over [UDP](https://searchnetworking.techtarget.com/definition/UDP-User-Datagram-Protocol). Alternative implementations can use completely different transports, or piggy back SWIM messages on some other existing gossip system etc.\n\nThe SWIM instance also has built-in support for emitting metrics (using [swift-metrics](https://github.com/apple/swift-metrics)) and can be configured to log details about internal details by passing a [swift-log](https://github.com/apple/swift-log) `Logger`.\n\n#### Example: Reusing the SWIM protocol logic implementation\n\nThe primary purpose of this library is to share the `SWIM.Instance` implementation across various implementations which need some form of in-process membership service. Implementing a custom runtime is documented in depth in the project’s README (https://github.com/apple/swift-cluster-membership/), so please have a look there if you are interested in implementing SWIM over some different transport.\n\nImplementing a new transport boils down a “fill in the blanks” exercise:\n\nFirst, one has to implement the Peer protocols (https://github.com/apple/swift-cluster-membership/blob/main/Sources/SWIM/Peer.swift) using one’s target transport:\n\n```swift\n/// SWIM peer which can be initiated contact with, by sending ping or ping request messages.\npublic protocol SWIMPeer: SWIMAddressablePeer {\n    /// Perform a probe of this peer by sending a `ping` message.\n    /// \n    /// <... more docs here - please refer to the API docs for the latest version ...>\n    func ping(\n        payload: SWIM.GossipPayload,\n        from origin: SWIMPingOriginPeer,\n        timeout: DispatchTimeInterval,\n        sequenceNumber: SWIM.SequenceNumber\n    ) async throws -> SWIM.PingResponse\n    \n    // ... \n}\n```\n\nWhich usually means wrapping some connection, channel, or other identity with the ability to send messages and invoke the appropriate callbacks when applicable.\n\nThen, on the receiving end of a peer, one has to implement receiving those messages and invoke all the corresponding \n`on<SomeMessage>(...)` callbacks defined on the ``SWIM/Instance`` (grouped under ``SWIMProtocol``).\n\nA piece of the ``SWIMProtocol`` is listed below to give you an idea about it:\n\n\n```swift\npublic protocol SWIMProtocol {\n\n    /// MUST be invoked periodically, in intervals of `self.swim.dynamicLHMProtocolInterval`.\n    ///\n    /// MUST NOT be scheduled using a \"repeated\" task/timer\", as the interval is dynamic and may change as the algorithm proceeds.\n    /// Implementations should schedule each next tick by handling the returned directive's `scheduleNextTick` case,\n    /// which includes the appropriate delay to use for the next protocol tick.\n    ///\n    /// This is the heart of the protocol, as each tick corresponds to a \"protocol period\" in which:\n    /// - suspect members are checked if they're overdue and should become `.unreachable` or `.dead`,\n    /// - decisions are made to `.ping` a random peer for fault detection,\n    /// - and some internal house keeping is performed.\n    ///\n    /// Note: This means that effectively all decisions are made in interval sof protocol periods.\n    /// It would be possible to have a secondary periodic or more ad-hoc interval to speed up\n    /// some operations, however this is currently not implemented and the protocol follows the fairly\n    /// standard mode of simply carrying payloads in periodic ping messages.\n    ///\n    /// - Returns: `SWIM.Instance.PeriodicPingTickDirective` which must be interpreted by a shell implementation\n    mutating func onPeriodicPingTick() -> [SWIM.Instance.PeriodicPingTickDirective]\n\n    mutating func onPing( ... ) -> [SWIM.Instance.PingDirective]\n\n    mutating func onPingRequest( ... ) -> [SWIM.Instance.PingRequestDirective]\n\n    mutating func onPingResponse( ... ) -> [SWIM.Instance.PingResponseDirective]\n\n    // ... \n}\n```\n\nThese calls perform all SWIM protocol specific tasks internally, and return directives which are simple to interpret “commands” to an implementation about how it should react to the message. For example, upon receiving a `.pingRequest` message, the returned directive may instruct a shell to send a ping to some nodes. The directive prepares all apropriate target, timeout and additional information that makes it simpler to simply follow its instruction and implement the call correctly, e.g. like this:\n\n```swift\nself.swim.onPingRequest(\n    target: target,\n    pingRequestOrigin: pingRequestOrigin,            \n    payload: payload,\n    sequenceNumber: sequenceNumber\n).forEach { directive in\n    switch directive {\n    case .gossipProcessed(let gossipDirective):\n        self.handleGossipPayloadProcessedDirective(gossipDirective)\n\n    case .sendPing(let target, let payload, let pingRequestOriginPeer, let pingRequestSequenceNumber, let timeout, let sequenceNumber):\n        self.sendPing(\n            to: target,\n            payload: payload,\n            pingRequestOrigin: pingRequestOriginPeer,\n            pingRequestSequenceNumber: pingRequestSequenceNumber,\n            timeout: timeout,\n            sequenceNumber: sequenceNumber\n        )\n    }\n}\n```\n\nIn general this allows for all the tricky \"what to do when\" to be encapsulated within the protocol instance, and a Shell only has to follow instructions implementing them. The actual implementations will often need to perform some more involved concurrency and networking tasks, like awaiting a sequence of responses, and handling them in a specific way etc, however the general outline of the protocol is orchestrated by the instance's directives.\n\n## Topics\n\n### SWIM logic implementation\n\n- ``SWIM/Instance``\n- ``SWIM/Member``\n\n### SWIM settings\n\n- ``SWIMGossipSettings``\n- ``SWIMLifeguardSettings``\n- ``SWIMMetricsSettings``\n\n### Protocols peer implementations must conform to \n\n- ``SWIMPeer``\n- ``SWIMAddressablePeer`` \n- ``SWIMPingOriginPeer`` \n- ``SWIMPingRequestOriginPeer`` \n\n### Namespace\n\n- ``SWIM/SWIM``\n"
  },
  {
    "path": "Sources/SWIM/Events.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\n\nextension SWIM {\n    /// Emitted whenever a membership change happens.\n    ///\n    /// Use `isReachabilityChange` to detect whether the is a change from an alive to unreachable/dead state or not,\n    /// and is worth emitting to user-code or not.\n    public struct MemberStatusChangedEvent<Peer: SWIMPeer>: Equatable {\n        /// The member that this change event is about.\n        public let member: SWIM.Member<Peer>\n\n        /// The resulting (\"current\") status of the `member`.\n        public var status: SWIM.Status {\n            // Note if the member is marked .dead, SWIM shall continue to gossip about it for a while\n            // such that other nodes gain this information directly, and do not have to wait until they detect\n            // it as such independently.\n            self.member.status\n        }\n\n        /// Previous status of the member, needed in order to decide if the change is \"effective\" or if applying the\n        /// member did not move it in such way that we need to inform the cluster about unreachability.\n        public let previousStatus: SWIM.Status?\n\n        /// Create new event, representing a change of the member's status from a previous state to its current state.\n        public init(previousStatus: SWIM.Status?, member: SWIM.Member<Peer>) {\n            if let from = previousStatus, from == .dead {\n                precondition(\n                    member.status == .dead,\n                    \"Change MUST NOT move status 'backwards' from [.dead] state to anything else, but did so, was: \\(member)\"\n                )\n            }\n\n            self.previousStatus = previousStatus\n            self.member = member\n\n            switch (self.previousStatus, member.status) {\n            case (.dead, .alive),\n                (.dead, .suspect),\n                (.dead, .unreachable):\n                fatalError(\n                    \"SWIM.Membership MUST NOT move status 'backwards' from .dead state to anything else, but did so, was: \\(self)\"\n                )\n            default:\n                ()  // ok, all other transitions are valid.\n            }\n        }\n    }\n}\n\nextension SWIM.MemberStatusChangedEvent {\n    /// Reachability changes are important events, in which a reachable node became unreachable, or vice-versa,\n    /// as opposed to events which only move a member between `.alive` and `.suspect` status,\n    /// during which the member should still be considered and no actions assuming it's death shall be performed (yet).\n    ///\n    /// If true, a system may want to issue a reachability change event and handle this situation by confirming the node `.dead`,\n    /// and proceeding with its removal from the cluster.\n    public var isReachabilityChange: Bool {\n        guard let fromStatus = self.previousStatus else {\n            // i.e. nil -> anything, is always an effective reachability affecting change\n            return true\n        }\n\n        // explicitly list all changes which are affecting reachability, all others do not (i.e. flipping between\n        // alive and suspect does NOT affect high-level reachability).\n        switch (fromStatus, self.status) {\n        case (.alive, .unreachable),\n            (.alive, .dead):\n            return true\n        case (.suspect, .unreachable),\n            (.suspect, .dead):\n            return true\n        case (.unreachable, .alive),\n            (.unreachable, .suspect):\n            return true\n        default:\n            return false\n        }\n    }\n}\n\nextension SWIM.MemberStatusChangedEvent: CustomStringConvertible {\n    public var description: String {\n        var res = \"MemberStatusChangedEvent(\\(self.member), previousStatus: \"\n        if let previousStatus = self.previousStatus {\n            res += \"\\(previousStatus)\"\n        } else {\n            res += \"<unknown>\"\n        }\n        res += \")\"\n        return res\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/Member.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\n\n@preconcurrency import struct Dispatch.DispatchTime\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: SWIM Member\n\nextension SWIM {\n    /// A `SWIM.Member` represents an active participant of the cluster.\n    ///\n    /// It associates a specific `SWIMAddressablePeer` with its `SWIM.Status` and a number of other SWIM specific state information.\n    public struct Member<Peer: SWIMPeer>: Sendable {\n        /// Peer reference, used to send messages to this cluster member.\n        ///\n        /// Can represent the \"local\" member as well, use `swim.isMyself` to verify if a peer is `myself`.\n        public var peer: Peer\n\n        /// `Node` of the member's `peer`.\n        public var node: ClusterMembership.Node {\n            self.peer.node\n        }\n\n        /// Membership status of this cluster member\n        public var status: SWIM.Status\n\n        // Period in which protocol period was this state set\n        public var protocolPeriod: UInt64\n\n        /// Indicates a _local_ point in time when suspicion was started.\n        ///\n        /// - Note: Only suspect members may have this value set, but having the actual field in SWIM.Member feels more natural.\n        /// - Note: This value is never carried across processes, as it serves only locally triggering suspicion timeouts.\n        public let localSuspicionStartedAt: DispatchTime?  // could be \"status updated at\"?\n\n        /// Create a new member.\n        public init(peer: Peer, status: SWIM.Status, protocolPeriod: UInt64, suspicionStartedAt: DispatchTime? = nil) {\n            self.peer = peer\n            self.status = status\n            self.protocolPeriod = protocolPeriod\n            self.localSuspicionStartedAt = suspicionStartedAt\n        }\n\n        /// Convenience function for checking if a member is `SWIM.Status.alive`.\n        ///\n        /// - Returns: `true` if the member is alive\n        public var isAlive: Bool {\n            self.status.isAlive\n        }\n\n        /// Convenience function for checking if a member is `SWIM.Status.suspect`.\n        ///\n        /// - Returns: `true` if the member is suspect\n        public var isSuspect: Bool {\n            self.status.isSuspect\n        }\n\n        /// Convenience function for checking if a member is `SWIM.Status.unreachable`\n        ///\n        /// - Returns: `true` if the member is unreachable\n        public var isUnreachable: Bool {\n            self.status.isUnreachable\n        }\n\n        /// Convenience function for checking if a member is `SWIM.Status.dead`\n        ///\n        /// - Returns: `true` if the member is dead\n        public var isDead: Bool {\n            self.status.isDead\n        }\n    }\n}\n\n/// Manual Hashable conformance since we omit `suspicionStartedAt` from identity\nextension SWIM.Member: Hashable, Equatable {\n    public static func == (lhs: SWIM.Member<Peer>, rhs: SWIM.Member<Peer>) -> Bool {\n        lhs.peer.node == rhs.peer.node && lhs.protocolPeriod == rhs.protocolPeriod && lhs.status == rhs.status\n    }\n\n    public func hash(into hasher: inout Hasher) {\n        hasher.combine(self.peer.node)\n        hasher.combine(self.protocolPeriod)\n        hasher.combine(self.status)\n    }\n}\n\nextension SWIM.Member: CustomStringConvertible, CustomDebugStringConvertible {\n    public var description: String {\n        var res = \"SWIM.Member(\\(self.peer), \\(self.status), protocolPeriod: \\(self.protocolPeriod)\"\n        if let suspicionStartedAt = self.localSuspicionStartedAt {\n            res.append(\", suspicionStartedAt: \\(suspicionStartedAt)\")\n        }\n        res.append(\")\")\n        return res\n    }\n\n    public var debugDescription: String {\n        var res = \"SWIM.Member(\\(String(reflecting: self.peer)), \\(self.status), protocolPeriod: \\(self.protocolPeriod)\"\n        if let suspicionStartedAt = self.localSuspicionStartedAt {\n            res.append(\", suspicionStartedAt: \\(suspicionStartedAt)\")\n        }\n        res.append(\")\")\n        return res\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/Metrics.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport Metrics\n\nextension SWIM {\n    /// Object containing all metrics a SWIM instance and shell should be reporting.\n    ///\n    /// - SeeAlso: `SWIM.Metrics.Shell` for metrics that a specific implementation should emit\n    public struct Metrics {\n        // ==== --------------------------------------------------------------------------------------------------------\n        // MARK: Membership\n\n        /// Number of members (alive)\n        public let membersAlive: Gauge\n        /// Number of members (suspect)\n        public let membersSuspect: Gauge\n        /// Number of members (unreachable)\n        public let membersUnreachable: Gauge\n        // Number of members (dead) is not reported, because \"dead\" is considered \"removed\" from the cluster\n        // -- no metric --\n\n        /// Total number of nodes *ever* declared noticed as dead by this member\n        public let membersTotalDead: Counter\n\n        /// The current number of tombstones for previously known (and now dead and removed) members.\n        public let removedDeadMemberTombstones: Gauge\n\n        // ==== --------------------------------------------------------------------------------------------------------\n        // MARK: Internal metrics\n\n        /// Current value of the local health multiplier.\n        public let localHealthMultiplier: Gauge\n\n        // ==== --------------------------------------------------------------------------------------------------------\n        // MARK: Probe metrics\n\n        /// Records the incarnation of the SWIM instance.\n        ///\n        /// Incarnation numbers are bumped whenever the node needs to refute some gossip about itself,\n        /// as such the incarnation number *growth* is an interesting indicator of cluster observation churn.\n        public let incarnation: Gauge\n\n        /// Total number of successful probes (pings with successful replies)\n        public let successfulPingProbes: Counter\n        /// Total number of failed probes (pings with successful replies)\n        public let failedPingProbes: Counter\n\n        /// Total number of successful ping request probes (pingRequest with successful replies)\n        /// Either an .ack or .nack from the intermediary node count as an success here\n        public let successfulPingRequestProbes: Counter\n        /// Total number of failed ping request probes (pings requests with successful replies)\n        /// Only a .timeout counts as a failed ping request.\n        public let failedPingRequestProbes: Counter\n\n        // ==== ----------------------------------------------------------------------------------------------------------------\n        // MARK: Shell / Transport Metrics\n\n        /// Metrics to be filled in by respective SWIM shell implementations.\n        public let shell: ShellMetrics\n\n        public struct ShellMetrics {\n            // ==== ----------------------------------------------------------------------------------------------------\n            // MARK: Probe metrics\n\n            /// Records time it takes for ping successful round-trips.\n            public let pingResponseTime: Timer\n\n            /// Records time it takes for (every) successful pingRequest round-trip\n            public let pingRequestResponseTimeAll: Timer\n            /// Records the time it takes for the (first) successful pingRequest to round trip\n            /// (A ping request hits multiple intermediary peers, the first reply is what counts)\n            public let pingRequestResponseTimeFirst: Timer\n\n            /// Number of incoming messages received\n            public let messageInboundCount: Counter\n            /// Sizes of messages received, in bytes\n            public let messageInboundBytes: Recorder\n\n            /// Number of messages sent\n            public let messageOutboundCount: Counter\n            /// Sizes of messages sent, in bytes\n            public let messageOutboundBytes: Recorder\n\n            public init(settings: SWIM.Settings) {\n                self.pingResponseTime = Timer(\n                    label: settings.metrics.makeLabel(\"roundTripTime\", \"ping\")\n                )\n\n                self.pingRequestResponseTimeAll = Timer(\n                    label: settings.metrics.makeLabel(\"roundTripTime\", \"pingRequest\"),\n                    dimensions: [(\"type\", \"all\")]\n                )\n                self.pingRequestResponseTimeFirst = Timer(\n                    label: settings.metrics.makeLabel(\"roundTripTime\", \"pingRequest\"),\n                    dimensions: [(\"type\", \"firstAck\")]\n                )\n\n                self.messageInboundCount = Counter(\n                    label: settings.metrics.makeLabel(\"message\", \"count\"),\n                    dimensions: [\n                        (\"direction\", \"in\")\n                    ]\n                )\n                self.messageInboundBytes = Recorder(\n                    label: settings.metrics.makeLabel(\"message\", \"bytes\"),\n                    dimensions: [\n                        (\"direction\", \"in\")\n                    ]\n                )\n\n                self.messageOutboundCount = Counter(\n                    label: settings.metrics.makeLabel(\"message\", \"count\"),\n                    dimensions: [\n                        (\"direction\", \"out\")\n                    ]\n                )\n                self.messageOutboundBytes = Recorder(\n                    label: settings.metrics.makeLabel(\"message\", \"bytes\"),\n                    dimensions: [\n                        (\"direction\", \"out\")\n                    ]\n                )\n            }\n        }\n\n        public init(settings: SWIM.Settings) {\n            self.membersAlive = Gauge(\n                label: settings.metrics.makeLabel(\"members\"),\n                dimensions: [(\"status\", \"alive\")]\n            )\n            self.membersSuspect = Gauge(\n                label: settings.metrics.makeLabel(\"members\"),\n                dimensions: [(\"status\", \"suspect\")]\n            )\n            self.membersUnreachable = Gauge(\n                label: settings.metrics.makeLabel(\"members\"),\n                dimensions: [(\"status\", \"unreachable\")]\n            )\n            self.membersTotalDead = Counter(\n                label: settings.metrics.makeLabel(\"members\", \"total\"),\n                dimensions: [(\"status\", \"dead\")]\n            )\n            self.removedDeadMemberTombstones = Gauge(\n                label: settings.metrics.makeLabel(\"removedMemberTombstones\")\n            )\n\n            self.localHealthMultiplier = Gauge(\n                label: settings.metrics.makeLabel(\"lha\")\n            )\n\n            self.incarnation = Gauge(label: settings.metrics.makeLabel(\"incarnation\"))\n\n            self.successfulPingProbes = Counter(\n                label: settings.metrics.makeLabel(\"probe\", \"ping\"),\n                dimensions: [(\"type\", \"successful\")]\n            )\n            self.failedPingProbes = Counter(\n                label: settings.metrics.makeLabel(\"probe\", \"ping\"),\n                dimensions: [(\"type\", \"failed\")]\n            )\n\n            self.successfulPingRequestProbes = Counter(\n                label: settings.metrics.makeLabel(\"probe\", \"pingRequest\"),\n                dimensions: [(\"type\", \"successful\")]\n            )\n            self.failedPingRequestProbes = Counter(\n                label: settings.metrics.makeLabel(\"probe\", \"pingRequest\"),\n                dimensions: [(\"type\", \"failed\")]\n            )\n\n            self.shell = .init(settings: settings)\n        }\n    }\n}\n\nextension SWIM.Metrics {\n    /// Update member metrics metrics based on SWIM's membership.\n    public func updateMembership(_ members: SWIM.Membership<some SWIMPeer>) {\n        var alives = 0\n        var suspects = 0\n        var unreachables = 0\n        for member in members {\n            switch member.status {\n            case .alive:\n                alives += 1\n            case .suspect:\n                suspects += 1\n            case .unreachable:\n                unreachables += 1\n            case .dead:\n                ()  // dead is reported as a removal when they're removed and tombstoned, not as a gauge\n            }\n        }\n        self.membersAlive.record(alives)\n        self.membersSuspect.record(suspects)\n        self.membersUnreachable.record(unreachables)\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/Peer.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020-2022 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\n\n/// Any peer in the cluster, can be used used to identify a peer using its unique node that it represents.\npublic protocol SWIMAddressablePeer: Sendable {\n    /// Node that this peer is representing.\n    nonisolated var swimNode: ClusterMembership.Node { get }\n}\n\nextension SWIMAddressablePeer {\n    internal var node: ClusterMembership.Node {\n        self.swimNode\n    }\n}\n\n/// SWIM A peer which originated a `ping`, should be replied to with an `ack`.\npublic protocol SWIMPingOriginPeer: SWIMAddressablePeer {\n    associatedtype Peer: SWIMPeer\n\n    /// Acknowledge a `ping`.\n    ///\n    /// - parameters:\n    ///   - sequenceNumber: the sequence number of the incoming ping that this ack should acknowledge\n    ///   - target: target peer which received the ping (i.e. \"myself\" on the recipient of the `ping`).\n    ///   - incarnation: incarnation number of the target (myself),\n    ///     which is used to clarify which status is the most recent on the recipient of this acknowledgement.\n    ///   - payload: additional gossip data to be carried with the message.\n    ///     It is already trimmed to be no larger than configured in `SWIM.Settings`.\n    func ack(\n        acknowledging sequenceNumber: SWIM.SequenceNumber,\n        target: Peer,\n        incarnation: SWIM.Incarnation,\n        payload: SWIM.GossipPayload<Peer>\n    ) async throws\n}\n\n/// A SWIM peer which originated a `pingRequest` and thus can receive either an `ack` or `nack` from the intermediary.\npublic protocol SWIMPingRequestOriginPeer: SWIMPingOriginPeer {\n    associatedtype NackTarget: SWIMPeer\n\n    /// \"Negative acknowledge\" a ping.\n    ///\n    /// This message may ONLY be send in an indirect-ping scenario from the \"middle\" peer.\n    /// Meaning, only a peer which received a `pingRequest` and wants to send the `pingRequestOrigin`\n    /// a nack in order for it to be aware that its message did reach this member, even if it never gets an `ack`\n    /// through this member, e.g. since the pings `target` node is actually not reachable anymore.\n    ///\n    /// - parameters:\n    ///   - sequenceNumber: the sequence number of the incoming `pingRequest` that this nack is a response to\n    ///   - target: the target peer which was attempted to be pinged but we didn't get an ack from it yet and are sending a nack back eagerly\n    func nack(\n        acknowledging sequenceNumber: SWIM.SequenceNumber,\n        target: NackTarget\n    ) async throws\n}\n\n/// SWIM peer which can be initiated contact with, by sending ping or ping request messages.\npublic protocol SWIMPeer: SWIMAddressablePeer {\n    associatedtype Peer: SWIMPeer\n    associatedtype PingOrigin: SWIMPingOriginPeer\n    associatedtype PingRequestOrigin: SWIMPingRequestOriginPeer\n\n    /// Perform a probe of this peer by sending a `ping` message.\n    ///\n    /// We expect the reply to be an `ack`.\n    ///\n    /// - parameters:\n    ///   - payload: additional gossip information to be processed by the recipient\n    ///   - origin: the origin peer that has initiated this ping message (i.e. \"myself\" of the sender)\n    ///     replies (`ack`s) from to this ping should be send to this peer\n    ///   - timeout: timeout during which we expect the other peer to have replied to us with a `PingResponse` about the pinged node.\n    ///     If we get no response about that peer in that time, this `ping` is considered failed, and the onResponse MUST be invoked with a `.timeout`.\n    ///   - sequenceNumber: sequence number of the ping message\n    ///\n    /// - Returns the corresponding reply (`ack`) or `timeout` event for this ping request occurs.\n    ///\n    /// - Throws if the ping fails or if the reply is `nack`.\n    func ping(\n        payload: SWIM.GossipPayload<Peer>,\n        from origin: PingOrigin,\n        timeout: Duration,\n        sequenceNumber: SWIM.SequenceNumber\n    ) async throws -> SWIM.PingResponse<Peer, PingRequestOrigin>\n\n    /// Send a ping request to this peer, asking it to perform an \"indirect ping\" of the target on our behalf.\n    ///\n    /// Any resulting acknowledgements back to us. If not acknowledgements come back from the target, the intermediary\n    /// may send back nack messages, indicating that our connection to the intermediary is intact, however we didn't see\n    /// acknowledgements from the target itself.\n    ///\n    /// - parameters:\n    ///   - target: target peer that should be probed by this the recipient on our behalf\n    ///   - payload: additional gossip information to be processed by the recipient\n    ///   - origin: the origin peer that has initiated this `pingRequest` (i.e. \"myself\" on the sender);\n    ///     replies (`ack`s) from this indirect ping should be forwarded to it.\n    ///   - timeout: timeout during which we expect the other peer to have replied to us with a `PingResponse` about the pinged node.\n    ///     If we get no response about that peer in that time, this `pingRequest` is considered failed, and the onResponse MUST be invoked with a `.timeout`.\n    ///   - sequenceNumber: sequence number of the pingRequest message\n    ///\n    /// - Returns the corresponding reply (`ack`, `nack`) or `timeout` event for this ping request occurs.\n    /// - Throws if the ping request fails\n    func pingRequest(\n        target: Peer,\n        payload: SWIM.GossipPayload<Peer>,\n        from origin: PingOrigin,\n        timeout: Duration,\n        sequenceNumber: SWIM.SequenceNumber\n    ) async throws -> SWIM.PingResponse<Peer, PingRequestOrigin>\n}\n"
  },
  {
    "path": "Sources/SWIM/SWIM.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\n\nimport struct Dispatch.DispatchTime\n\nextension SWIM {\n    /// Incarnation numbers serve as sequence number and used to determine which observation\n    /// is \"more recent\" when comparing gossiped information.\n    public typealias Incarnation = UInt64\n\n    /// A sequence number which can be used to associate with messages in order to establish an request/response\n    /// relationship between ping/pingRequest and their corresponding ack/nack messages.\n    public typealias SequenceNumber = UInt32\n\n    /// Typealias for the underlying membership representation.\n    public typealias Membership<Peer: SWIMPeer> = Dictionary<Node, SWIM.Member<Peer>>.Values\n}\n\nextension SWIM {\n    /// Message sent in reply to a `.ping`.\n    ///\n    /// The ack may be delivered directly in a request-response fashion between the probing and pinged members,\n    /// or indirectly, as a result of a `pingRequest` message.\n    public enum PingResponse<Peer: SWIMPeer, PingRequestOrigin: SWIMPingRequestOriginPeer>: Sendable {\n        /// - parameters:\n        ///   - target: the target of the ping;\n        ///     On the remote \"pinged\" node which is about to send an ack back to the ping origin this should be filled with the `myself` peer.\n        ///   - incarnation: the incarnation of the peer sent in the `target` field\n        ///   - payload: additional gossip data to be carried with the message.\n        ///   - sequenceNumber: the `sequenceNumber` of the `ping` message this ack is a \"reply\" for;\n        ///     It is used on the ping origin to co-relate the reply with its handling code.\n        case ack(\n            target: Peer,\n            incarnation: Incarnation,\n            payload: GossipPayload<Peer>,\n            sequenceNumber: SWIM.SequenceNumber\n        )\n\n        /// A `.nack` MAY ONLY be sent by an *intermediary* member which was received a `pingRequest` to perform a `ping` of some `target` member.\n        /// It SHOULD NOT be sent by a peer that received a `.ping` directly.\n        ///\n        /// The nack allows the origin of the ping request to know if the `k` peers it asked to perform the indirect probes,\n        /// are still responsive to it, or if perhaps that communication by itself is also breaking down. This information is\n        /// used to adjust the `localHealthMultiplier`, which impacts probe and timeout intervals.\n        ///\n        /// Note that nack information DOES NOT directly cause unreachability or suspicions, it only adjusts the timeouts\n        /// and intervals used by the swim instance in order to take into account the potential that our local node is\n        /// potentially not healthy.\n        ///\n        /// - parameters:\n        ///   - target: the target of the ping;\n        ///     On the remote \"pinged\" node which is about to send an ack back to the ping origin this should be filled with the `myself` peer.\n        ///   - target: the target of the ping;\n        ///     On the remote \"pinged\" node which is about to send an ack back to the ping origin this should be filled with the `myself` peer.\n        ///   - payload: The gossip payload to be carried in this message.\n        ///\n        /// - SeeAlso: Lifeguard IV.A. Local Health Aware Probe\n        case nack(target: Peer, sequenceNumber: SWIM.SequenceNumber)\n\n        /// This is a \"pseudo-message\", in the sense that it is not transported over the wire, but should be triggered\n        /// and fired into an implementation Shell when a ping has timed out.\n        ///\n        /// If a response for some reason produces a different error immediately rather than through a timeout,\n        /// the shell should also emit a `.timeout` response and feed it into the `SWIM.Instance` as it is important for\n        /// timeout adjustments that the instance makes. The instance does not need to know specifics about the reason of\n        /// a response not arriving, thus they are all handled via the same timeout response rather than extra \"error\" responses.\n        ///\n        /// - parameters:\n        ///   - target: the target of the ping;\n        ///     On the remote \"pinged\" node which is about to send an ack back to the ping origin this should be filled with the `myself` peer.\n        ///   - pingRequestOrigin: if this response/timeout is in response to a ping that was caused by a pingRequest,\n        ///     `pingRequestOrigin` must contain the original peer which originated the ping request.\n        ///   - timeout: the timeout interval value that caused this message to be triggered;\n        ///     In case of \"cancelled\" operations or similar semantics it is allowed to use a placeholder value here.\n        ///   - sequenceNumber: the `sequenceNumber` of the `ping` message this ack is a \"reply\" for;\n        ///     It is used on the ping origin to co-relate the reply with its handling code.\n        case timeout(\n            target: Peer,\n            pingRequestOrigin: PingRequestOrigin?,\n            timeout: Duration,\n            sequenceNumber: SWIM.SequenceNumber\n        )\n\n        /// Sequence number of the initial request this is a response to.\n        /// Used to pair up responses to the requests which initially caused them.\n        ///\n        /// All ping responses are guaranteed to have a sequence number attached to them.\n        public var sequenceNumber: SWIM.SequenceNumber {\n            switch self {\n            case .ack(_, _, _, let sequenceNumber):\n                return sequenceNumber\n            case .nack(_, let sequenceNumber):\n                return sequenceNumber\n            case .timeout(_, _, _, let sequenceNumber):\n                return sequenceNumber\n            }\n        }\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Gossip\n\nextension SWIM {\n    /// A piece of \"gossip\" about a specific member of the cluster.\n    ///\n    /// A gossip will only be spread a limited number of times, as configured by `settings.gossip.gossipedEnoughTimes(_:members:)`.\n    public struct Gossip<Peer: SWIMPeer>: Equatable {\n        /// The specific member (including status) that this gossip is about.\n        ///\n        /// A change in member status implies a new gossip must be created and the count for the rumor mongering must be reset.\n        public let member: SWIM.Member<Peer>\n        /// The number of times this specific gossip message was gossiped to another peer.\n        public internal(set) var numberOfTimesGossiped: Int\n    }\n\n    /// A `GossipPayload` is used to spread gossips about members.\n    public enum GossipPayload<Peer: SWIMPeer>: Sendable {\n        /// Explicit case to signal \"no gossip payload\"\n        ///\n        /// Effectively equivalent to an empty `.membership([])` case.\n        case none\n        /// Gossip information about a few select members.\n        case membership([SWIM.Member<Peer>])\n    }\n}\n\nextension SWIM.GossipPayload {\n    /// True if the underlying gossip is empty.\n    public var isNone: Bool {\n        switch self {\n        case .none:\n            return true\n        case .membership:\n            return false\n        }\n    }\n\n    /// True if the underlying gossip contains membership information.\n    public var isMembership: Bool {\n        switch self {\n        case .none:\n            return false\n        case .membership:\n            return true\n        }\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/SWIMInstance.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport CoreMetrics\nimport Logging\n\nimport struct Dispatch.DispatchTime\n\n#if canImport(Darwin)\nimport Darwin\n#elseif canImport(Glibc)\nimport Glibc\n#elseif canImport(Musl)\nimport Musl\n#else\n#error(\"Unsupported platform\")\n#endif\n\nextension SWIM {\n    /// The `SWIM.Instance` encapsulates the complete algorithm implementation of the `SWIM` protocol.\n    ///\n    /// **Please refer to `SWIM` for an in-depth discussion of the algorithm and extensions implemented in this package.**\n    ///\n    /// - SeeAlso: `SWIM` for a complete and in depth discussion of the protocol.\n    public struct Instance<\n        Peer: SWIMPeer,\n        PingOrigin: SWIMPingOriginPeer,\n        PingRequestOrigin: SWIMPingRequestOriginPeer\n    >: SWIMProtocol {\n        /// The settings currently in use by this instance.\n        public let settings: SWIM.Settings\n\n        /// Struct containing all metrics a SWIM Instance (and implementation Shell) should emit.\n        public let metrics: SWIM.Metrics\n\n        /// Node which this SWIM.Instance is representing in the cluster.\n        public var swimNode: ClusterMembership.Node {\n            self.peer.node\n        }\n\n        // Convenience overload for internal use so we don't have to repeat \"swim\" all the time.\n        internal var node: ClusterMembership.Node {\n            self.swimNode\n        }\n\n        private var log: Logger {\n            self.settings.logger\n        }\n\n        /// The `SWIM.Member` representing this instance, also referred to as \"myself\".\n        public var member: SWIM.Member<Peer> {\n            if let storedMyself = self.member(forNode: self.swimNode),\n                !storedMyself.status.isAlive\n            {\n                return storedMyself  // it is something special, like .dead\n            } else {\n                // return the always up to date \"our view\" on ourselves\n                return SWIM.Member(\n                    peer: self.peer,\n                    status: .alive(incarnation: self.incarnation),\n                    protocolPeriod: self.protocolPeriod\n                )\n            }\n        }\n\n        // We store the owning SWIMShell peer in order avoid adding it to the `membersToPing` list\n        private let peer: Peer\n\n        /// Main members storage, map to values to obtain current members.\n        internal var _members: [ClusterMembership.Node: SWIM.Member<Peer>] {\n            didSet {\n                self.metrics.updateMembership(self.members)\n            }\n        }\n\n        /// List of members maintained in random yet stable order, see `addMember` for details.\n        internal var membersToPing: [SWIM.Member<Peer>]\n        /// Constantly mutated by `nextMemberToPing` in an effort to keep the order in which we ping nodes evenly distributed.\n        private var _membersToPingIndex: Int = 0\n        private var membersToPingIndex: Int {\n            self._membersToPingIndex\n        }\n\n        /// Tombstones are needed to avoid accidentally re-adding a member that we confirmed as dead already.\n        internal var removedDeadMemberTombstones: Set<MemberTombstone> = [] {\n            didSet {\n                self.metrics.removedDeadMemberTombstones.record(self.removedDeadMemberTombstones.count)\n            }\n        }\n\n        private var _sequenceNumber: SWIM.SequenceNumber = 0\n        /// Sequence numbers are used to identify messages and pair them up into request/replies.\n        /// - SeeAlso: `SWIM.SequenceNumber`\n        public mutating func nextSequenceNumber() -> SWIM.SequenceNumber {\n            // TODO: can we make it internal? it does not really hurt having public\n            // TODO: sequence numbers per-target node? https://github.com/apple/swift-cluster-membership/issues/39\n            self._sequenceNumber += 1\n            return self._sequenceNumber\n        }\n\n        /// Lifeguard IV.A. Local Health Multiplier (LHM)\n        /// > These different sources of feedback are combined in a Local Health Multiplier (LHM).\n        /// > LHM is a saturating counter, with a max value S and min value zero, meaning it will not\n        /// > increase above S or decrease below zero.\n        ///\n        /// The local health multiplier (LHM for short) is designed to relax the `probeInterval` and `pingTimeout`.\n        ///\n        /// The value MUST be >= 0.\n        ///\n        /// - SeeAlso: `SWIM.Instance.LHModifierEvent` for details how and when the LHM is adjusted.\n        public var localHealthMultiplier = 0 {\n            didSet {\n                assert(\n                    self.localHealthMultiplier >= 0,\n                    \"localHealthMultiplier MUST NOT be < 0, but was: \\(self.localHealthMultiplier)\"\n                )\n                self.metrics.localHealthMultiplier.record(self.localHealthMultiplier)\n            }\n        }\n\n        /// Dynamically adjusted probing interval.\n        ///\n        /// Usually this interval will be yielded with a directive at appropriate spots, so it should not be\n        /// necessary to invoke it manually.\n        ///\n        /// - SeeAlso: `localHealthMultiplier` for more detailed documentation.\n        /// - SeeAlso: Lifeguard IV.A. Local Health Multiplier (LHM)\n        var dynamicLHMProtocolInterval: Duration {\n            .nanoseconds(Int(self.settings.probeInterval.nanoseconds * Int64(1 + self.localHealthMultiplier)))\n        }\n\n        /// Dynamically adjusted (based on Local Health) timeout to be used when sending `ping` messages.\n        ///\n        /// Usually this interval will be yielded with a directive at appropriate spots, so it should not be\n        /// necessary to invoke it manually.\n        ///\n        /// - SeeAlso: `localHealthMultiplier` for more detailed documentation.\n        /// - SeeAlso: Lifeguard IV.A. Local Health Multiplier (LHM)\n        var dynamicLHMPingTimeout: Duration {\n            .nanoseconds(Int(self.settings.pingTimeout.nanoseconds * Int64(1 + self.localHealthMultiplier)))\n        }\n\n        /// The incarnation number is used to get a sense of ordering of events, so if an `.alive` or `.suspect`\n        /// state with a lower incarnation than the one currently known by a node is received, it can be dropped\n        /// as outdated and we don't accidentally override state with older events. The incarnation can only\n        /// be incremented by the respective node itself and will happen if that node receives a `.suspect` for\n        /// itself, to which it will respond with an `.alive` with the incremented incarnation.\n        var incarnation: SWIM.Incarnation {\n            self._incarnation\n        }\n\n        private var _incarnation: SWIM.Incarnation = 0 {\n            didSet {\n                self.metrics.incarnation.record(self._incarnation)\n            }\n        }\n\n        private mutating func nextIncarnation() {\n            self._incarnation += 1\n        }\n\n        /// Creates a new SWIM algorithm instance.\n        public init(settings: SWIM.Settings, myself: Peer) {\n            self.settings = settings\n            self.peer = myself\n            self._members = [:]\n            self.membersToPing = []\n            self.metrics = SWIM.Metrics(settings: settings)\n            _ = self.addMember(myself, status: .alive(incarnation: 0))\n\n            self.metrics.incarnation.record(self.incarnation)\n            self.metrics.localHealthMultiplier.record(self.localHealthMultiplier)\n            self.metrics.updateMembership(self.members)\n        }\n\n        func makeSuspicion(incarnation: SWIM.Incarnation) -> SWIM.Status {\n            .suspect(incarnation: incarnation, suspectedBy: [self.node])\n        }\n\n        func mergeSuspicions(\n            suspectedBy: Set<ClusterMembership.Node>,\n            previouslySuspectedBy: Set<ClusterMembership.Node>\n        ) -> Set<ClusterMembership.Node> {\n            var newSuspectedBy = previouslySuspectedBy\n            for suspectedBy in suspectedBy.sorted()\n            where newSuspectedBy.count < self.settings.lifeguard.maxIndependentSuspicions {\n                newSuspectedBy.update(with: suspectedBy)\n            }\n            return newSuspectedBy\n        }\n\n        /// Adjust the Local Health-aware Multiplier based on the event causing it.\n        ///\n        /// - Parameter event: event which causes the LHM adjustment.\n        public mutating func adjustLHMultiplier(_ event: LHModifierEvent) {\n            defer {\n                self.settings.logger.trace(\n                    \"Adjusted LHM multiplier\",\n                    metadata: [\n                        \"swim/lhm/event\": \"\\(event)\",\n                        \"swim/lhm\": \"\\(self.localHealthMultiplier)\",\n                    ]\n                )\n            }\n\n            self.localHealthMultiplier =\n                min(\n                    max(0, self.localHealthMultiplier + event.lhmAdjustment),\n                    self.settings.lifeguard.maxLocalHealthMultiplier\n                )\n        }\n\n        // The protocol period represents the number of times we have pinged a random member\n        // of the cluster. At the end of every ping cycle, the number will be incremented.\n        // Suspicion timeouts are based on the protocol period, i.e. if a probe did not\n        // reply within any of the `suspicionTimeoutPeriodsMax` rounds, it would be marked as `.suspect`.\n        private var _protocolPeriod: UInt64 = 0\n\n        /// In order to speed up the spreading of \"fresh\" rumors, we order gossips in their \"number of times gossiped\",\n        /// and thus are able to easily pick the least spread rumor and pick it for the next gossip round.\n        ///\n        /// This is tremendously important in order to spread information about e.g. newly added members to others,\n        /// before members which are aware of them could have a chance to all terminate, leaving the rest of the cluster\n        /// unaware about those new members. For disseminating suspicions this is less urgent, however also serves as an\n        /// useful optimization.\n        ///\n        /// - SeeAlso: SWIM 4.1. Infection-Style Dissemination Component\n        private var _messagesToGossip: Heap<SWIM.Gossip<Peer>> = Heap(\n            comparator: {\n                $0.numberOfTimesGossiped < $1.numberOfTimesGossiped\n            }\n        )\n\n        /// Note that peers without UID (in their `Node`) will NOT be added to the membership.\n        ///\n        /// This is because a cluster member must be a _specific_ peer instance, and not some arbitrary \"some peer on that host/port\",\n        /// which a Node without UID represents. The only reason we allow for peers and nodes without UID, is to simplify making\n        /// initial contact with a node - i.e. one can construct a peer to \"there should be a peer on this host/port\" to send an initial ping,\n        /// however in reply a peer in gossip must ALWAYS include it's unique identifier in the node - such that we know it from\n        /// any new instance of a process on the same host/port pair.\n        internal mutating func addMember(_ peer: Peer, status: SWIM.Status) -> [AddMemberDirective] {\n            var directives: [AddMemberDirective] = []\n\n            // Guard 1) protect against adding already known dead members\n            if self.hasTombstone(peer.node) {\n                // We saw this member already and even confirmed it dead, it shall never be added again\n                self.log.debug(\"Attempt to re-add already confirmed dead peer \\(peer), ignoring it.\")\n                directives.append(.memberAlreadyKnownDead(Member(peer: peer, status: .dead, protocolPeriod: 0)))\n                return directives\n            }\n\n            // Guard 2) protect against adding non UID members\n            guard peer.node.uid != nil else {\n                self.log.warning(\"Ignoring attempt to add peer representing node without UID: \\(peer)\")\n                return directives\n            }\n\n            let maybeExistingMember = self.member(for: peer)\n            if let existingMember = maybeExistingMember, existingMember.status.supersedes(status) {\n                // we already have a newer state for this member\n                directives.append(.newerMemberAlreadyPresent(existingMember))\n                return directives\n            }\n\n            /// if we're adding a node, it may be a reason to declare the previous \"incarnation\" as dead\n            // TODO: could solve by another dictionary without the UIDs?\n            if let withoutUIDMatchMember = self._members.first(where: {\n                $0.value.node.withoutUID == peer.node.withoutUID\n            })?.value,\n                peer.node.uid != nil,  // the incoming node has UID, so it definitely is a real peer\n                peer.node.uid != withoutUIDMatchMember.node.uid\n            {  // the peers don't agree on UID, it must be a new node on same host/port\n                switch self.confirmDead(peer: withoutUIDMatchMember.peer) {\n                case .ignored:\n                    ()  // should not happen?\n                case .applied(let change):\n                    directives.append(.previousHostPortMemberConfirmedDead(change))\n                }\n            }\n\n            // just in case we had a peer added manually, and thus we did not know its uuid, let us remove it\n            // maybe we replaced a mismatching UID node already, but let's check and remove also if we stored any \"without UID\" node\n            if let removed = self._members.removeValue(forKey: self.node.withoutUID) {\n                switch self.confirmDead(peer: removed.peer) {\n                case .ignored:\n                    ()  // should not happen?\n                case .applied(let change):\n                    directives.append(.previousHostPortMemberConfirmedDead(change))\n                }\n            }\n\n            let member = SWIM.Member(peer: peer, status: status, protocolPeriod: self.protocolPeriod)\n            self._members[member.node] = member\n\n            if self.notMyself(member), !member.isDead {\n                // We know this is a new member.\n                //\n                // Newly added members are inserted at a random spot in the list of members\n                // to ping, to have a better distribution of messages to this node from all\n                // other nodes. If for example all nodes would add it to the end of the list,\n                // it would take a longer time until it would be pinged for the first time\n                // and also likely receive multiple pings within a very short time frame.\n                let insertIndex = Int.random(in: self.membersToPing.startIndex...self.membersToPing.endIndex)\n                self.membersToPing.insert(member, at: insertIndex)\n                if insertIndex <= self.membersToPingIndex {\n                    // If we inserted the new member before the current `membersToPingIndex`,\n                    // we need to advance the index to avoid pinging the same member multiple\n                    // times in a row. This is especially critical when inserting a larger\n                    // number of members, e.g. when the cluster is just being formed, or\n                    // on a rolling restart.\n                    self.advanceMembersToPingIndex()\n                }\n            }\n\n            // upon each membership change we reset the gossip counters\n            // such that nodes have a chance to be notified about others,\n            // even if a node joined an otherwise quiescent cluster.\n            self.resetGossipPayloads(member: member)\n\n            directives.append(.added(member))\n\n            return directives\n        }\n\n        enum AddMemberDirective {\n            /// Informs an implementation that a new member was added and now has the following state.\n            /// An implementation should react to this by emitting a cluster membership change event.\n            case added(SWIM.Member<Peer>)\n            /// By adding a node with a new UID on the same host/port, we may actually invalidate any previous member that\n            /// existed on this host/port part. If this is the case, we confirm the \"previous\" member on the same host/port\n            /// pair as dead immediately.\n            case previousHostPortMemberConfirmedDead(SWIM.MemberStatusChangedEvent<Peer>)\n            /// We already have information about this exact `Member`, and our information is more recent (higher incarnation number).\n            /// The incoming information was discarded and the returned here member is the most up to date information we have.\n            case newerMemberAlreadyPresent(SWIM.Member<Peer>)\n            /// Member already was part of the cluster, became dead and we removed it.\n            /// It shall never be part of the cluster again.\n            ///\n            /// This is only enforced by tombstones which are kept in the system for a period of time,\n            /// in the hope that all other nodes stop gossiping about this known dead member until then as well.\n            case memberAlreadyKnownDead(SWIM.Member<Peer>)\n        }\n\n        /// Implements the round-robin yet shuffled member to probe selection as proposed in the SWIM paper.\n        ///\n        /// This mechanism should reduce the time until state is spread across the whole cluster,\n        /// by guaranteeing that each node will be gossiped to within N cycles (where N is the cluster size).\n        ///\n        /// - Note:\n        ///   SWIM 4.3: [...] The failure detection protocol at member works by maintaining a list (intuitively, an array) of the known\n        ///   elements of the current membership list, and select-ing ping targets not randomly from this list,\n        ///   but in a round-robin fashion. Instead, a newly joining member is inserted in the membership list at\n        ///   a position that is chosen uniformly at random. On completing a traversal of the entire list,\n        ///   rearranges the membership list to a random reordering.\n        mutating func nextPeerToPing() -> Peer? {\n            if self.membersToPing.isEmpty {\n                return nil\n            }\n\n            defer {\n                self.advanceMembersToPingIndex()\n            }\n            return self.membersToPing[self.membersToPingIndex].peer\n        }\n\n        /// Selects `settings.indirectProbeCount` members to send a `ping-req` to.\n        func membersToPingRequest(target: SWIMAddressablePeer) -> ArraySlice<SWIM.Member<Peer>> {\n            func notTarget(_ peer: SWIMAddressablePeer) -> Bool {\n                peer.node != target.node\n            }\n\n            func isReachable(_ status: SWIM.Status) -> Bool {\n                status.isAlive || status.isSuspect\n            }\n\n            let candidates = self._members\n                .values\n                .filter {\n                    notTarget($0.peer) && notMyself($0.peer) && isReachable($0.status)\n                }\n                .shuffled()\n\n            return candidates.prefix(self.settings.indirectProbeCount)\n        }\n\n        /// Mark a specific peer/member with the new status.\n        mutating func mark(_ peer: Peer, as status: SWIM.Status) -> MarkedDirective {\n            let previousStatusOption = self.status(of: peer)\n\n            var status = status\n            var protocolPeriod = self.protocolPeriod\n            var suspicionStartedAt: DispatchTime?\n\n            if case .suspect(let incomingIncarnation, let incomingSuspectedBy) = status,\n                case .suspect(let previousIncarnation, let previousSuspectedBy)? = previousStatusOption,\n                let member = self.member(for: peer),\n                incomingIncarnation == previousIncarnation\n            {\n                let suspicions = self.mergeSuspicions(\n                    suspectedBy: incomingSuspectedBy,\n                    previouslySuspectedBy: previousSuspectedBy\n                )\n                status = .suspect(incarnation: incomingIncarnation, suspectedBy: suspicions)\n                // we should keep old protocol period when member is already a suspect\n                protocolPeriod = member.protocolPeriod\n                suspicionStartedAt = member.localSuspicionStartedAt\n            } else if case .suspect = status {\n                suspicionStartedAt = self.now()\n            } else if case .unreachable = status,\n                case SWIM.Settings.UnreachabilitySettings.disabled = self.settings.unreachability\n            {\n                self.log.warning(\n                    \"Attempted to mark \\(peer.node) as `.unreachable`, but unreachability is disabled! Promoting to `.dead`!\"\n                )\n                status = .dead\n            }\n\n            if let previousStatus = previousStatusOption, previousStatus.supersedes(status) {\n                // we already have a newer status for this member\n                return .ignoredDueToOlderStatus(currentStatus: previousStatus)\n            }\n\n            let member = SWIM.Member(\n                peer: peer,\n                status: status,\n                protocolPeriod: protocolPeriod,\n                suspicionStartedAt: suspicionStartedAt\n            )\n            self._members[peer.node] = member\n\n            if status.isDead {\n                if let _ = self._members.removeValue(forKey: peer.node) {\n                    self.metrics.membersTotalDead.increment()\n                }\n                self.removeFromMembersToPing(member)\n                if let uid = member.node.uid {\n                    let deadline = self.protocolPeriod + self.settings.tombstoneTimeToLiveInTicks\n                    let tombstone = MemberTombstone(uid: uid, deadlineProtocolPeriod: deadline)\n                    self.removedDeadMemberTombstones.insert(tombstone)\n                }\n            }\n\n            self.resetGossipPayloads(member: member)\n\n            return .applied(previousStatus: previousStatusOption, member: member)\n        }\n\n        enum MarkedDirective: Equatable {\n            /// The status that was meant to be set is \"old\" and was ignored.\n            /// We already have newer information about this peer (`currentStatus`).\n            case ignoredDueToOlderStatus(currentStatus: SWIM.Status)\n            case applied(previousStatus: SWIM.Status?, member: SWIM.Member<Peer>)\n        }\n\n        private mutating func resetGossipPayloads(member: SWIM.Member<Peer>) {\n            // seems we gained a new member, and we need to reset gossip counts in order to ensure it also receive information about all nodes\n            // TODO: this would be a good place to trigger a full state sync, to speed up convergence; see https://github.com/apple/swift-cluster-membership/issues/37\n            self.members.forEach { self.addToGossip(member: $0) }\n        }\n\n        mutating func incrementProtocolPeriod() {\n            self._protocolPeriod += 1\n        }\n\n        mutating func advanceMembersToPingIndex() {\n            self._membersToPingIndex = (self._membersToPingIndex + 1) % self.membersToPing.count\n        }\n\n        mutating func removeFromMembersToPing(_ member: SWIM.Member<Peer>) {\n            if let index = self.membersToPing.firstIndex(where: { $0.peer.node == member.peer.node }) {\n                self.membersToPing.remove(at: index)\n                if index < self.membersToPingIndex {\n                    self._membersToPingIndex -= 1\n                }\n\n                if self.membersToPingIndex >= self.membersToPing.count {\n                    self._membersToPingIndex = self.membersToPing.startIndex\n                }\n            }\n        }\n\n        /// Current SWIM protocol period (i.e. which round of gossip the instance is in).\n        public var protocolPeriod: UInt64 {\n            self._protocolPeriod\n        }\n\n        /// Debug only. Actual suspicion timeout depends on number of suspicions and calculated in `suspicionTimeout`\n        /// This will only show current estimate of how many intervals should pass before suspicion is reached. May change when more data is coming\n        var timeoutSuspectsBeforePeriodMax: Int64 {\n            self.settings.lifeguard.suspicionTimeoutMax.nanoseconds / self.dynamicLHMProtocolInterval.nanoseconds + 1\n        }\n\n        /// Debug only. Actual suspicion timeout depends on number of suspicions and calculated in `suspicionTimeout`\n        /// This will only show current estimate of how many intervals should pass before suspicion is reached. May change when more data is coming\n        var timeoutSuspectsBeforePeriodMin: Int64 {\n            self.settings.lifeguard.suspicionTimeoutMin.nanoseconds / self.dynamicLHMProtocolInterval.nanoseconds + 1\n        }\n\n        /// Local Health Aware Suspicion timeout calculation, as defined Lifeguard IV.B.\n        ///\n        /// Suspicion timeout is logarithmically decaying from `suspicionTimeoutPeriodsMax` to `suspicionTimeoutPeriodsMin`\n        /// depending on a number of suspicion confirmations.\n        ///\n        /// Suspicion timeout adjusted according to number of known independent suspicions of given member.\n        ///\n        /// See: Lifeguard IV-B: Local Health Aware Suspicion\n        ///\n        /// The timeout for a given suspicion is calculated as follows:\n        ///\n        /// ```\n        ///                                             log(C + 1) 􏰁\n        /// SuspicionTimeout =􏰀 max(Min, Max − (Max−Min) ----------)\n        ///                                             log(K + 1)\n        /// ```\n        ///\n        /// where:\n        /// - `Min` and `Max` are the minimum and maximum Suspicion timeout.\n        ///   See Section `V-C` for discussion of their configuration.\n        /// - `K` is the number of independent suspicions required to be received before setting the suspicion timeout to `Min`.\n        ///   We default `K` to `3`.\n        /// - `C` is the number of independent suspicions about that member received since the local suspicion was raised.\n        public func suspicionTimeout(suspectedByCount: Int) -> Duration {\n            let minTimeout = self.settings.lifeguard.suspicionTimeoutMin.nanoseconds\n            let maxTimeout = self.settings.lifeguard.suspicionTimeoutMax.nanoseconds\n\n            return .nanoseconds(\n                Int(\n                    max(\n                        minTimeout,\n                        maxTimeout\n                            - Int64(\n                                round(\n                                    Double(maxTimeout - minTimeout)\n                                        * (log2(Double(suspectedByCount + 1))\n                                            / log2(Double(self.settings.lifeguard.maxIndependentSuspicions + 1)))\n                                )\n                            )\n                    )\n                )\n            )\n        }\n\n        /// Checks if a deadline is expired (relating to current time).\n        ///\n        /// - Parameter deadline: deadline we want to check if it's expired\n        /// - Returns: true if the `now()` time is \"past\" the deadline\n        public func isExpired(deadline: DispatchTime) -> Bool {\n            deadline < self.now()\n        }\n\n        /// Returns the current point in time on this machine.\n        /// - Note: `DispatchTime` is simply a number of nanoseconds since boot on this machine, and thus is not comparable across machines.\n        ///   We use it on purpose, as we do not intend to share our local time observations with any other peers.\n        private func now() -> DispatchTime {\n            self.settings.timeSourceNow()\n        }\n\n        /// Create a gossip payload (i.e. a set of `SWIM.Gossip` messages) that should be gossiped with failure detector\n        /// messages, or using some other medium.\n        ///\n        /// - Parameter target: Allows passing the target peer this gossip will be sent to.\n        ///     If gossiping to a specific peer, and given peer is suspect, we will always prioritize\n        ///     letting it know that it is being suspected, such that it can refute the suspicion as soon as possible,\n        ///     if if still is alive.\n        /// - Returns: The gossip payload to be gossiped.\n        public mutating func makeGossipPayload(to target: SWIMAddressablePeer?) -> SWIM.GossipPayload<Peer> {\n            var membersToGossipAbout: [SWIM.Member<Peer>] = []\n            // Lifeguard IV. Buddy System\n            // Always send to a suspect its suspicion.\n            // The reason for that to ensure the suspect will be notified it is being suspected,\n            // even if the suspicion has already been disseminated \"enough times\".\n            let targetIsSuspect: Bool\n            if let target = target,\n                let member = self.member(forNode: target.node),\n                member.isSuspect\n            {\n                // the member is suspect, and we must inform it about this, thus including in gossip payload:\n                membersToGossipAbout.append(member)\n                targetIsSuspect = true\n            } else {\n                targetIsSuspect = false\n            }\n\n            guard self._messagesToGossip.count > 0 else {\n                if membersToGossipAbout.isEmpty {\n                    // if we have no pending gossips to share, at least inform the member about our state.\n                    return .membership([self.member])\n                } else {\n                    return .membership(membersToGossipAbout)\n                }\n            }\n\n            // In order to avoid duplicates within a single gossip payload, we first collect all messages we need to\n            // gossip out and only then re-insert them into `messagesToGossip`. Otherwise, we may end up selecting the\n            // same message multiple times, if e.g. the total number of messages is smaller than the maximum gossip\n            // size, or for newer messages that have a lower `numberOfTimesGossiped` counter than the other messages.\n            var gossipRoundMessages: [SWIM.Gossip<Peer>] = []\n            gossipRoundMessages.reserveCapacity(\n                min(self.settings.gossip.maxNumberOfMessagesPerGossip, self._messagesToGossip.count)\n            )\n            while gossipRoundMessages.count < self.settings.gossip.maxNumberOfMessagesPerGossip,\n                let gossip = self._messagesToGossip.removeRoot()\n            {\n                gossipRoundMessages.append(gossip)\n            }\n\n            membersToGossipAbout.reserveCapacity(gossipRoundMessages.count)\n\n            for var gossip in gossipRoundMessages {\n                if targetIsSuspect, target?.node == gossip.member.node {\n                    // We do NOT add gossip to payload if it's a gossip about target and target is suspect,\n                    // this case was handled earlier and doing it here will lead to duplicate messages\n                    ()\n                } else {\n                    membersToGossipAbout.append(gossip.member)\n                }\n\n                gossip.numberOfTimesGossiped += 1\n                if self.settings.gossip.needsToBeGossipedMoreTimes(gossip, members: self.members.count) {\n                    self._messagesToGossip.append(gossip)\n                }\n            }\n\n            return .membership(membersToGossipAbout)\n        }\n\n        /// Adds `Member` to gossip messages.\n        internal mutating func addToGossip(member: SWIM.Member<Peer>) {\n            // we need to remove old state before we add the new gossip, so we don't gossip out stale state\n            self._messagesToGossip.remove(where: { $0.member.peer.node == member.peer.node })\n            self._messagesToGossip.append(.init(member: member, numberOfTimesGossiped: 0))\n        }\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: SWIM Member helper functions\n\nextension SWIM.Instance {\n    func notMyself(_ member: SWIM.Member<Peer>) -> Bool {\n        self.whenMyself(member) == nil\n    }\n\n    func notMyself(_ peer: SWIMAddressablePeer) -> Bool {\n        !self.isMyself(peer.node)\n    }\n\n    func isMyself(_ member: SWIM.Member<Peer>) -> Bool {\n        self.isMyself(member.node)\n    }\n\n    func whenMyself(_ member: SWIM.Member<Peer>) -> SWIM.Member<Peer>? {\n        if self.isMyself(member.peer) {\n            return member\n        } else {\n            return nil\n        }\n    }\n\n    func isMyself(_ peer: SWIMAddressablePeer) -> Bool {\n        self.isMyself(peer.node)\n    }\n\n    func isMyself(_ node: Node) -> Bool {\n        // we are exactly that node:\n        self.node == node\n            // ...or, the incoming node has no UID; there was no handshake made,\n            // and thus the other side does not know which specific node it is going to talk to; as such, \"we\" are that node\n            // as such, \"we\" are that node; we should never add such peer to our members, but we will reply to that node with \"us\" and thus\n            // inform it about our specific UID, and from then onwards it will know about specifically this node (by replacing its UID-less version with our UID-ful version).\n            || self.node.withoutUID == node\n    }\n\n    /// Returns status of the passed in peer's member of the cluster, if known.\n    ///\n    /// - Parameter peer: the peer to look up the status for.\n    /// - Returns: Status of the peer, if known.\n    public func status(of peer: SWIMAddressablePeer) -> SWIM.Status? {\n        if self.notMyself(peer) {\n            return self._members[peer.node]?.status\n        } else {\n            // we consider ourselves always as alive (enables refuting others suspecting us)\n            return .alive(incarnation: self.incarnation)\n        }\n    }\n\n    /// Checks if the passed in peer is already a known member of the swim cluster.\n    ///\n    /// Note: `.dead` members are eventually removed from the swim instance and as such peers are not remembered forever!\n    ///\n    /// - parameters:\n    ///   - peer: Peer to check if it currently is a member\n    ///   - ignoreUID: Whether or not to ignore the peers UID, e.g. this is useful when issuing a \"join 127.0.0.1:7337\"\n    ///                command, while being unaware of the nodes specific UID. When it joins, it joins with the specific UID after all.\n    /// - Returns: true if the peer is currently a member of the swim cluster (regardless of status it is in)\n    public func isMember(_ peer: SWIMAddressablePeer, ignoreUID: Bool = false) -> Bool {\n        // the peer could be either:\n        self.isMyself(peer)  // 1) \"us\" (i.e. the peer which hosts this SWIM instance, or\n            || self._members[peer.node] != nil  // 2) a \"known member\"\n            || (ignoreUID && peer.node.uid == nil\n                && self._members.contains {\n                    // 3) a known member, however the querying peer did not know the real UID of the peer yet\n                    $0.key.withoutUID == peer.node\n                })\n    }\n\n    /// Returns specific `SWIM.Member` instance for the passed in peer.\n    ///\n    /// - Parameter peer: peer whose member should be looked up (by its node identity, including the UID)\n    /// - Returns: the peer's member instance, if it currently is a member of this cluster\n    public func member(for peer: Peer) -> SWIM.Member<Peer>? {\n        self.member(forNode: peer.node)\n    }\n\n    /// Returns specific `SWIM.Member` instance for the passed in node.\n    ///\n    /// - Parameter node: node whose member should be looked up (matching also by node UID)\n    /// - Returns: the peer's member instance, if it currently is a member of this cluster\n    public func member(forNode node: ClusterMembership.Node) -> SWIM.Member<Peer>? {\n        self._members[node]\n    }\n\n    /// Count of only non-dead members.\n    ///\n    /// - SeeAlso: `SWIM.Status`\n    public var notDeadMemberCount: Int {\n        self._members.lazy.filter {\n            !$0.value.isDead\n        }.count\n    }\n\n    /// Count of all \"other\" members known to this instance (meaning members other than `myself`).\n    ///\n    /// This is equal to `n-1` where `n` is the number of nodes in the cluster.\n    public var otherMemberCount: Int {\n        self.allMemberCount - 1\n    }\n\n    /// Count of all members, including the myself node as well as any unreachable and dead nodes which are still kept in the membership.\n    public var allMemberCount: Int {\n        self._members.count\n    }\n\n    /// Lists all members known to this SWIM instance currently, potentially including even `.dead` nodes.\n    ///\n    /// - Complexity: O(1)\n    /// - Returns: Returns all current members of the cluster, including suspect, unreachable and potentially dead members.\n    public var members: SWIM.Membership<Peer> {\n        self._members.values\n    }\n\n    /// Lists all `SWIM.Status.suspect` members.\n    ///\n    /// The `myself` member will never be suspect, as we always assume ourselves to be alive,\n    /// even if all other cluster members think otherwise - this is what allows us to refute\n    /// suspicions about our unreachability after all.\n    ///\n    /// - SeeAlso: `SWIM.Status.suspect`\n    internal var suspects: [SWIM.Member<Peer>] {\n        self.members.filter { $0.isSuspect }\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Handling SWIM protocol interactions\n\nextension SWIM.Instance {\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: On Periodic Ping Tick Handler\n\n    public mutating func onPeriodicPingTick() -> [PeriodicPingTickDirective] {\n        defer {\n            self.incrementProtocolPeriod()\n        }\n\n        var directives: [PeriodicPingTickDirective] = []\n\n        // 1) always check suspicion timeouts, even if we no longer have anyone else to ping\n        directives.append(contentsOf: self.checkSuspicionTimeouts())\n\n        // 2) if we have someone to ping, let's do so\n        if let toPing = self.nextPeerToPing() {\n            directives.append(\n                .sendPing(\n                    target: toPing,\n                    payload: self.makeGossipPayload(to: toPing),\n                    timeout: self.dynamicLHMPingTimeout,\n                    sequenceNumber: self.nextSequenceNumber()\n                )\n            )\n        }\n\n        // 3) periodic cleanup of tombstones\n        // TODO: could be optimized a bit to keep the \"oldest one\" and know if we have to scan already or not yet\" etc\n        if self.protocolPeriod % UInt64(self.settings.tombstoneCleanupIntervalInTicks) == 0 {\n            cleanupTombstones()\n        }\n\n        // 3) ALWAYS schedule the next tick\n        directives.append(.scheduleNextTick(delay: self.dynamicLHMProtocolInterval))\n\n        return directives\n    }\n\n    /// Describes how a periodic tick should be handled.\n    public enum PeriodicPingTickDirective {\n        /// The membership has changed, e.g. a member was declared unreachable or dead and an event may need to be emitted.\n        case membershipChanged(SWIM.MemberStatusChangedEvent<Peer>)\n        /// Send a ping to the requested `target` peer using the provided timeout and sequenceNumber.\n        case sendPing(\n            target: Peer,\n            payload: SWIM.GossipPayload<Peer>,\n            timeout: Duration,\n            sequenceNumber: SWIM.SequenceNumber\n        )\n        /// Schedule the next timer `onPeriodicPingTick` invocation in `delay` time.\n        case scheduleNextTick(delay: Duration)\n    }\n\n    /// Check all suspects if any of them have been suspect for long enough that we should promote them to unreachable or dead.\n    ///\n    /// Suspicion timeouts are calculated taking into account the number of peers suspecting a given member (LHA-Suspicion).\n    private mutating func checkSuspicionTimeouts() -> [PeriodicPingTickDirective] {\n        var directives: [PeriodicPingTickDirective] = []\n\n        for suspect in self.suspects {\n            if case .suspect(_, let suspectedBy) = suspect.status {\n                let suspicionTimeout = self.suspicionTimeout(suspectedByCount: suspectedBy.count)\n                // proceed with suspicion escalation to .unreachable if the timeout period has been exceeded\n                // We don't use Deadline because tests can override TimeSource\n                guard let suspectSince = suspect.localSuspicionStartedAt,\n                    self.isExpired(\n                        deadline: DispatchTime(\n                            uptimeNanoseconds: suspectSince.uptimeNanoseconds + UInt64(suspicionTimeout.nanoseconds)\n                        )\n                    )\n                else {\n                    continue  // skip, this suspect is not timed-out yet\n                }\n\n                guard let incarnation = suspect.status.incarnation else {\n                    // suspect had no incarnation number? that means it is .dead already and should be recycled soon\n                    continue\n                }\n\n                let newStatus: SWIM.Status\n                if self.settings.unreachability == .enabled {\n                    newStatus = .unreachable(incarnation: incarnation)\n                } else {\n                    newStatus = .dead\n                }\n\n                switch self.mark(suspect.peer, as: newStatus) {\n                case .applied(let previousStatus, let member):\n                    directives.append(\n                        .membershipChanged(\n                            SWIM.MemberStatusChangedEvent(previousStatus: previousStatus, member: member)\n                        )\n                    )\n                case .ignoredDueToOlderStatus:\n                    continue\n                }\n            }\n        }\n\n        self.metrics.updateMembership(self.members)\n        return directives\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: On Ping Handler\n\n    public mutating func onPing(\n        pingOrigin: PingOrigin,\n        payload: SWIM.GossipPayload<Peer>,\n        sequenceNumber: SWIM.SequenceNumber\n    ) -> [PingDirective] {\n        var directives: [PingDirective]\n\n        // 1) Process gossip\n        directives = self.onGossipPayload(payload).map { g in\n            .gossipProcessed(g)\n        }\n\n        // 2) Prepare reply\n        directives.append(\n            .sendAck(\n                to: pingOrigin,\n                pingedTarget: self.peer,\n                incarnation: self.incarnation,\n                payload: self.makeGossipPayload(to: pingOrigin),\n                acknowledging: sequenceNumber\n            )\n        )\n\n        return directives\n    }\n\n    /// Directs a shell implementation about how to handle an incoming `.ping`.\n    public enum PingDirective {\n        /// Indicates that incoming gossip was processed and the membership may have changed because of it,\n        /// inspect the `GossipProcessedDirective` to learn more about what change was applied.\n        case gossipProcessed(GossipProcessedDirective)\n\n        /// Send an `ack` message.\n        ///\n        /// - parameters:\n        ///   - to: the peer to which an `ack` should be sent\n        ///   - pingedTarget: the `myself` peer, should be passed as `target` when sending the ack message\n        ///   - incarnation: the incarnation number of this peer; used to determine which status is \"the latest\"\n        ///     when comparing acknowledgement with suspicions\n        ///   - payload: additional gossip payload to include in the ack message\n        ///   - acknowledging: sequence number of the ack message\n        case sendAck(\n            to: PingOrigin,\n            pingedTarget: Peer,\n            incarnation: SWIM.Incarnation,\n            payload: SWIM.GossipPayload<Peer>,\n            acknowledging: SWIM.SequenceNumber\n        )\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: On Ping Response Handlers\n\n    public mutating func onPingResponse(\n        response: SWIM.PingResponse<Peer, PingRequestOrigin>,\n        pingRequestOrigin: PingRequestOrigin?,\n        pingRequestSequenceNumber: SWIM.SequenceNumber?\n    ) -> [PingResponseDirective] {\n        switch response {\n        case .ack(let target, let incarnation, let payload, let sequenceNumber):\n            return self.onPingAckResponse(\n                target: target,\n                incarnation: incarnation,\n                payload: payload,\n                pingRequestOrigin: pingRequestOrigin,\n                pingRequestSequenceNumber: pingRequestSequenceNumber,\n                sequenceNumber: sequenceNumber\n            )\n        case .nack(let target, let sequenceNumber):\n            return self.onPingNackResponse(\n                target: target,\n                pingRequestOrigin: pingRequestOrigin,\n                sequenceNumber: sequenceNumber\n            )\n        case .timeout(let target, let pingRequestOrigin, let timeout, _):\n            return self.onPingResponseTimeout(\n                target: target,\n                timeout: timeout,\n                pingRequestOrigin: pingRequestOrigin,\n                pingRequestSequenceNumber: pingRequestSequenceNumber\n            )\n        }\n    }\n\n    mutating func onPingAckResponse(\n        target pingedNode: Peer,\n        incarnation: SWIM.Incarnation,\n        payload: SWIM.GossipPayload<Peer>,\n        pingRequestOrigin: PingRequestOrigin?,\n        pingRequestSequenceNumber: SWIM.SequenceNumber?,\n        sequenceNumber: SWIM.SequenceNumber\n    ) -> [PingResponseDirective] {\n        self.metrics.successfulPingProbes.increment()\n\n        var directives: [PingResponseDirective] = []\n        // We're proxying an ack payload from ping target back to ping source.\n        // If ping target was a suspect, there'll be a refutation in a payload\n        // and we probably want to process it asap. And since the data is already here,\n        // processing this payload will just make gossip convergence faster.\n        let gossipDirectives = self.onGossipPayload(payload)\n        directives.append(\n            contentsOf: gossipDirectives.map {\n                PingResponseDirective.gossipProcessed($0)\n            }\n        )\n\n        self.log.debug(\n            \"Received ack from [\\(pingedNode)] with incarnation [\\(incarnation)] and payload [\\(payload)]\",\n            metadata: self.metadata\n        )\n        // The shell is already informed tha the member moved -> alive by the gossipProcessed directive\n        _ = self.mark(pingedNode, as: .alive(incarnation: incarnation))\n\n        if let pingRequestOrigin = pingRequestOrigin,\n            let pingRequestSequenceNumber = pingRequestSequenceNumber\n        {\n            directives.append(\n                .sendAck(\n                    peer: pingRequestOrigin,\n                    acknowledging: pingRequestSequenceNumber,\n                    target: pingedNode,\n                    incarnation: incarnation,\n                    payload: payload\n                )\n            )\n        } else {\n            self.adjustLHMultiplier(.successfulProbe)\n        }\n\n        return directives\n    }\n\n    mutating func onPingNackResponse(\n        target pingedNode: Peer,\n        pingRequestOrigin: PingRequestOrigin?,\n        sequenceNumber: SWIM.SequenceNumber\n    ) -> [PingResponseDirective] {\n        // yes, a nack is \"successful\" -- we did get a reply from the peer we contacted after all\n        self.metrics.successfulPingProbes.increment()\n\n        // Important:\n        // We do _nothing_ here, however we actually handle nacks implicitly in today's SWIMNIO implementation...\n        // This works because the arrival of the nack means we removed the callback from the handler,\n        // so the timeout also is cancelled and thus no +1 will happen since the timeout will not trigger as well\n        //\n        // we should solve this more nicely, so any implementation benefits from this;\n        // FIXME: .nack handling discussion https://github.com/apple/swift-cluster-membership/issues/52\n        return []\n    }\n\n    mutating func onPingResponseTimeout(\n        target: Peer,\n        timeout: Duration,\n        pingRequestOrigin: PingRequestOrigin?,\n        pingRequestSequenceNumber: SWIM.SequenceNumber?\n    ) -> [PingResponseDirective] {\n        self.metrics.failedPingProbes.increment()\n\n        var directives: [PingResponseDirective] = []\n        if let pingRequestOrigin = pingRequestOrigin,\n            let pingRequestSequenceNumber = pingRequestSequenceNumber\n        {\n            // Meaning we were doing a ping on behalf of the pingReq origin, we got a timeout, and thus need to report a nack back.\n            directives.append(\n                .sendNack(\n                    peer: pingRequestOrigin,\n                    acknowledging: pingRequestSequenceNumber,\n                    target: target\n                )\n            )\n            // Note that we do NOT adjust the LHM multiplier, this is on purpose.\n            // We do not adjust it if we are only an intermediary.\n        } else {\n            // We sent a direct `.ping` and it timed out; we now suspect the target node and must issue additional ping requests.\n            guard let pingedMember = self.member(for: target) else {\n                return directives  // seems we are not aware of this node, ignore it\n            }\n            guard let pingedMemberLastKnownIncarnation = pingedMember.status.incarnation else {\n                return directives  // so it is already dead, not need to suspect it\n            }\n\n            // The member should become suspect, it missed out ping/ack cycle:\n            // we do not inform the shell about -> suspect moves; only unreachable or dead moves are of interest to it.\n            _ = self.mark(pingedMember.peer, as: self.makeSuspicion(incarnation: pingedMemberLastKnownIncarnation))\n\n            // adjust the LHM accordingly, we failed a probe (ping/ack) cycle\n            self.adjustLHMultiplier(.failedProbe)\n\n            // if we have other peers, we should ping request through them,\n            // if not then there's no-one to ping request through and we just continue.\n            if let pingRequestDirective = self.preparePingRequests(target: pingedMember.peer) {\n                directives.append(.sendPingRequests(pingRequestDirective))\n            }\n        }\n\n        return directives\n    }\n\n    /// Prepare ping request directives such that the shell can easily fire those messages\n    mutating func preparePingRequests(target: Peer) -> SendPingRequestDirective? {\n        guard let lastKnownStatus = self.status(of: target) else {\n            // FIXME allow logging\n            // context.log.info(\"Skipping ping requests after failed ping to [\\(toPing)] because node has been removed from member list\")\n            return nil\n        }\n\n        // select random members to send ping requests to\n        let membersToPingRequest = self.membersToPingRequest(target: target)\n\n        guard !membersToPingRequest.isEmpty else {\n            // no nodes available to ping, so we have to assume the node suspect right away\n            guard let lastKnownIncarnation = lastKnownStatus.incarnation else {\n                // TODO logging\n                // log.debug(\"Not marking .suspect, as [\\(target)] is already dead.\") // \"You are already dead!\"\n                return nil\n            }\n\n            switch self.mark(target, as: self.makeSuspicion(incarnation: lastKnownIncarnation)) {\n            case .applied:\n                // TODO: logging\n                // log.debug(\"No members to ping-req through, marked [\\(target)] immediately as [\\(currentStatus)].\")\n                return nil\n            case .ignoredDueToOlderStatus:\n                // TODO: logging\n                // log.debug(\"No members to ping-req through to [\\(target)], was already [\\(currentStatus)].\")\n                return nil\n            }\n        }\n\n        let details = membersToPingRequest.map { member in\n            SendPingRequestDirective.PingRequestDetail(\n                peerToPingRequestThrough: member.peer,\n                payload: self.makeGossipPayload(to: target),\n                sequenceNumber: self.nextSequenceNumber()\n            )\n        }\n\n        return SendPingRequestDirective(target: target, timeout: self.dynamicLHMPingTimeout, requestDetails: details)\n    }\n\n    /// Directs a shell implementation about how to handle an incoming `.pingRequest`.\n    public enum PingResponseDirective {\n        /// Indicates that incoming gossip was processed and the membership may have changed because of it,\n        /// inspect the `GossipProcessedDirective` to learn more about what change was applied.\n        case gossipProcessed(GossipProcessedDirective)\n\n        /// Upon receiving an `ack` from `target`, if we were making this ping because of a `pingRequest` from `peer`,\n        /// we need to forward that acknowledgement to that peer now.\n        ///\n        /// - parameters:\n        ///   - to: the peer to which an `ack` should be sent\n        ///   - pingedTarget: the `myself` peer, should be passed as `target` when sending the ack message\n        ///   - incarnation: the incarnation number of this peer; used to determine which status is \"the latest\"\n        ///     when comparing acknowledgement with suspicions\n        ///   - payload: additional gossip payload to include in the ack message\n        ///   - acknowledging: sequence number of the ack message\n        case sendAck(\n            peer: PingRequestOrigin,\n            acknowledging: SWIM.SequenceNumber,\n            target: Peer,\n            incarnation: UInt64,\n            payload: SWIM.GossipPayload<Peer>\n        )\n\n        /// Send a `nack` to the `peer` which originally send this peer request.\n        ///\n        /// - parameters:\n        ///   - peer: the peer to which the `nack` should be sent\n        ///   - acknowledging: sequence number of the ack message\n        ///   - target: the peer which we attempted to ping but it didn't reply on time\n        case sendNack(peer: PingRequestOrigin, acknowledging: SWIM.SequenceNumber, target: Peer)\n\n        /// Send a `pingRequest` as described by the `SendPingRequestDirective`.\n        ///\n        /// The target node did not reply with an successful `.ack` and as such was now marked as `.suspect`.\n        /// By sending ping requests to other members of the cluster we attempt to revert this suspicion,\n        /// perhaps some other node is able to receive an `.ack` from it after all?\n        case sendPingRequests(SendPingRequestDirective)\n    }\n\n    /// Describes how a pingRequest should be performed.\n    ///\n    /// Only a single `target` peer is used, however it may be pinged \"through\" a few other members.\n    /// The amount of fan-out in pingRequests is configurable by `swim.indirectProbeCount`.\n    public struct SendPingRequestDirective {\n        /// Target that the should be probed by the `requestDetails.memberToPingRequestThrough` peers.\n        public let target: Peer\n        /// Timeout to be used for all the ping requests about to be sent.\n        public let timeout: Duration\n        /// Describes the details how each ping request should be performed.\n        public let requestDetails: [PingRequestDetail]\n\n        /// Describes a specific ping request to be made.\n        public struct PingRequestDetail {\n            /// Marks the peer the `pingRequest` should be sent to.\n            public let peerToPingRequestThrough: Peer\n            /// Additional gossip to carry with the `pingRequest`\n            public let payload: SWIM.GossipPayload<Peer>\n            /// Sequence number to assign to this `pingRequest`.\n            public let sequenceNumber: SWIM.SequenceNumber\n        }\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: On Ping Request\n\n    public mutating func onPingRequest(\n        target: Peer,\n        pingRequestOrigin: PingRequestOrigin,\n        payload: SWIM.GossipPayload<Peer>,\n        sequenceNumber: SWIM.SequenceNumber\n    ) -> [PingRequestDirective] {\n        var directives: [PingRequestDirective] = []\n\n        // 1) Process gossip\n        let gossipDirectives: [PingRequestDirective] = self.onGossipPayload(payload).map { directive in\n            .gossipProcessed(directive)\n        }\n        directives.append(contentsOf: gossipDirectives)\n\n        // 2) Process the ping request itself\n        guard self.notMyself(target) else {\n            self.log.debug(\n                \"Received pingRequest to ping myself myself, ignoring.\",\n                metadata: self.metadata([\n                    \"swim/pingRequestOrigin\": \"\\(pingRequestOrigin)\",\n                    \"swim/pingSequenceNumber\": \"\\(sequenceNumber)\",\n                ])\n            )\n            return directives\n        }\n\n        if !self.isMember(target) {\n            // The case when member is a suspect is already handled in `processGossipPayload`,\n            // since payload will always contain suspicion about target member; no need to inform the shell again about this\n            _ = self.addMember(target, status: .alive(incarnation: 0))\n        }\n\n        let pingSequenceNumber = self.nextSequenceNumber()\n        // Indirect ping timeout should always be shorter than pingRequest timeout.\n        // Setting it to a fraction of initial ping timeout as suggested in the original paper.\n        // - SeeAlso: Local Health Multiplier (LHM)\n        let indirectPingTimeout = Duration.nanoseconds(\n            Int(Double(self.settings.pingTimeout.nanoseconds) * self.settings.lifeguard.indirectPingTimeoutMultiplier)\n        )\n\n        directives.append(\n            .sendPing(\n                target: target,\n                payload: self.makeGossipPayload(to: target),\n                pingRequestOrigin: pingRequestOrigin,\n                pingRequestSequenceNumber: sequenceNumber,\n                timeout: indirectPingTimeout,\n                pingSequenceNumber: pingSequenceNumber\n            )\n        )\n\n        return directives\n    }\n\n    /// Directs a shell implementation about how to handle an incoming `.pingRequest`.\n    public enum PingRequestDirective {\n        /// Indicates that incoming gossip was processed and the membership may have changed because of it,\n        /// inspect the `GossipProcessedDirective` to learn more about what change was applied.\n        case gossipProcessed(GossipProcessedDirective)\n        /// Send a ping to the requested `target` peer using the provided timeout and sequenceNumber.\n        ///\n        /// - parameters:\n        ///   - target: the target peer which should be probed\n        ///   - payload: gossip information to be processed by this peer,\n        ///     resulting in potentially discovering new information about other members of the cluster\n        ///   - pingRequestOrigin: peer on whose behalf we are performing this indirect ping;\n        ///     it will be useful to pipe back replies from the target to the origin member.\n        ///   - pingRequestSequenceNumber: sequence number that must be used when replying to the `pingRequestOrigin`\n        ///   - timeout: timeout to be used when performing the ping probe (it MAY be smaller than a normal direct ping probe's timeout)\n        ///   - pingSequenceNumber: sequence number to use for the `ping` message\n        case sendPing(\n            target: Peer,\n            payload: SWIM.GossipPayload<Peer>,\n            pingRequestOrigin: PingRequestOrigin,\n            pingRequestSequenceNumber: SWIM.SequenceNumber,\n            timeout: Duration,\n            pingSequenceNumber: SWIM.SequenceNumber\n        )\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: On Ping Request Response\n\n    /// This should be called on first successful (non-nack) pingRequestResponse\n    public mutating func onPingRequestResponse(\n        _ response: SWIM.PingResponse<Peer, PingRequestOrigin>,\n        pinged pingedPeer: Peer\n    ) -> [PingRequestResponseDirective] {\n        guard let previousStatus = self.status(of: pingedPeer) else {\n            // we do not process replies from an unknown member; it likely means we have removed it already for some reason.\n            return [.unknownMember]\n        }\n        var directives: [PingRequestResponseDirective] = []\n\n        switch response {\n        case .ack(let target, let incarnation, let payload, _):\n            assert(\n                target.node == pingedPeer.node,\n                \"The ack.from member [\\(target)] MUST be equal to the pinged member \\(pingedPeer.node)]; The Ack message is being forwarded back to us from the pinged member.\"\n            )\n\n            let gossipDirectives = self.onGossipPayload(payload)\n            directives += gossipDirectives.map {\n                PingRequestResponseDirective.gossipProcessed($0)\n            }\n\n            switch self.mark(pingedPeer, as: .alive(incarnation: incarnation)) {\n            case .applied:\n                directives.append(.alive(previousStatus: previousStatus))\n                return directives\n            case .ignoredDueToOlderStatus(let currentStatus):\n                directives.append(.ignoredDueToOlderStatus(currentStatus: currentStatus))\n                return directives\n            }\n        case .nack:\n            // TODO: this should never happen. How do we express it?\n            directives.append(.nackReceived)\n            return directives\n\n        case .timeout:\n            switch previousStatus {\n            case .alive(let incarnation),\n                .suspect(let incarnation, _):\n                switch self.mark(pingedPeer, as: self.makeSuspicion(incarnation: incarnation)) {\n                case .applied:\n                    directives.append(\n                        .newlySuspect(previousStatus: previousStatus, suspect: self.member(forNode: pingedPeer.node)!)\n                    )\n                    return directives\n                case .ignoredDueToOlderStatus(let status):\n                    directives.append(.ignoredDueToOlderStatus(currentStatus: status))\n                    return directives\n                }\n            case .unreachable:\n                directives.append(.alreadyUnreachable)\n                return directives\n            case .dead:\n                directives.append(.alreadyDead)\n                return directives\n            }\n        }\n    }\n\n    public mutating func onEveryPingRequestResponse(\n        _ result: SWIM.PingResponse<Peer, PingRequestOrigin>,\n        pinged peer: Peer\n    ) -> [PingRequestResponseDirective] {\n        switch result {\n        case .timeout:\n            // Failed pingRequestResponse indicates a missed nack, we should adjust LHMultiplier\n            self.metrics.failedPingRequestProbes.increment()\n            self.adjustLHMultiplier(.probeWithMissedNack)\n        case .ack, .nack:\n            // Successful pingRequestResponse should be handled only once (and thus in `onPingRequestResponse` only),\n            // however we can nicely handle all responses here for purposes of metrics (and NOT adjust them in the onPingRequestResponse\n            // since that would lead to double-counting successes)\n            self.metrics.successfulPingRequestProbes.increment()\n        }\n\n        return []  // just so happens that we never actually perform any actions here (so far, keeping the return type for future compatibility)\n    }\n\n    /// Directs a shell implementation about how to handle an incoming ping request response.\n    public enum PingRequestResponseDirective {\n        /// Indicates that incoming gossip was processed and the membership may have changed because of it,\n        /// inspect the `GossipProcessedDirective` to learn more about what change was applied.\n        case gossipProcessed(GossipProcessedDirective)\n\n        case alive(previousStatus: SWIM.Status)  // TODO: offer a membership change option rather?\n        case nackReceived\n        /// Indicates that the `target` of the ping response is not known to this peer anymore,\n        /// it could be that we already marked it as dead and removed it.\n        ///\n        /// No additional action, except optionally some debug logging should be performed.\n        case unknownMember\n        case newlySuspect(previousStatus: SWIM.Status, suspect: SWIM.Member<Peer>)\n        case alreadySuspect\n        case alreadyUnreachable\n        case alreadyDead\n        /// The incoming gossip is older than already known information about the target peer (by incarnation), and was (safely) ignored.\n        /// The current status of the peer is as returned in `currentStatus`.\n        case ignoredDueToOlderStatus(currentStatus: SWIM.Status)\n    }\n\n    internal mutating func onGossipPayload(_ payload: SWIM.GossipPayload<Peer>) -> [GossipProcessedDirective] {\n        switch payload {\n        case .none:\n            return []\n        case .membership(let members):\n            return members.flatMap { member in\n                self.onGossipPayload(about: member)\n            }\n        }\n    }\n\n    internal mutating func onGossipPayload(about member: SWIM.Member<Peer>) -> [GossipProcessedDirective] {\n        if self.isMyself(member) {\n            return [self.onMyselfGossipPayload(myself: member)]\n        } else {\n            return self.onOtherMemberGossipPayload(member: member)\n        }\n    }\n\n    /// ### Unreachability status handling\n    /// Performs all special handling of `.unreachable` such that if it is disabled members are automatically promoted to `.dead`.\n    /// See `settings.unreachability` for more details.\n    private mutating func onMyselfGossipPayload(myself incoming: SWIM.Member<Peer>) -> GossipProcessedDirective {\n        assert(\n            self.peer.node == incoming.peer.node,\n            \"\"\"\n            Attempted to process gossip as-if about myself, but was not the same peer, was: \\(incoming.peer.node.detailedDescription). \\\n            Myself: \\(self.peer)\n            SWIM.Instance: \\(self)\n            \"\"\"\n        )\n\n        // Note, we don't yield changes for myself node observations, thus the self node will never be reported as unreachable,\n        // after all, we can always reach ourselves. We may reconsider this if we wanted to allow SWIM to inform us about\n        // the fact that many other nodes think we're unreachable, and thus we could perform self-downing based upon this information\n\n        switch incoming.status {\n        case .alive:\n            // as long as other nodes see us as alive, we're happy\n            return .applied(change: nil)\n        case .suspect(let suspectedInIncarnation, _):\n            // someone suspected us, so we need to increment our incarnation number to spread our alive status with\n            // the incremented incarnation\n            if suspectedInIncarnation == self.incarnation {\n                self.adjustLHMultiplier(.refutingSuspectMessageAboutSelf)\n                self.nextIncarnation()\n                // refute the suspicion, we clearly are still alive\n                self.addToGossip(member: self.member)\n                return .applied(change: nil)\n            } else if suspectedInIncarnation > self.incarnation {\n                self.log.warning(\n                    \"\"\"\n                    Received gossip about self with incarnation number [\\(suspectedInIncarnation)] > current incarnation [\\(self._incarnation)], \\\n                    which should never happen and while harmless is highly suspicious, please raise an issue with logs. This MAY be an issue in the library.\n                    \"\"\"\n                )\n                return .applied(change: nil)\n            } else {\n                // incoming incarnation was < than current one, i.e. the incoming information is \"old\" thus we discard it\n                return .applied(change: nil)\n            }\n\n        case .unreachable(let unreachableInIncarnation):\n            switch self.settings.unreachability {\n            case .enabled:\n                // someone suspected us,\n                // so we need to increment our incarnation number to spread our alive status with the incremented incarnation\n                if unreachableInIncarnation == self.incarnation {\n                    self.nextIncarnation()\n                    return .ignored\n                } else if unreachableInIncarnation > self.incarnation {\n                    self.log.warning(\n                        \"\"\"\n                        Received gossip about self with incarnation number [\\(unreachableInIncarnation)] > current incarnation [\\(self._incarnation)], \\\n                        which should never happen and while harmless is highly suspicious, please raise an issue with logs. This MAY be an issue in the library.\n                        \"\"\"\n                    )\n                    return .applied(change: nil)\n                } else {\n                    self.log.debug(\n                        \"Incoming .unreachable about myself, however current incarnation [\\(self.incarnation)] is greater than incoming \\(incoming.status)\"\n                    )\n                    return .ignored\n                }\n\n            case .disabled:\n                // we don't use unreachable states, and in any case, would not apply it to myself\n                // as we always consider \"us\" to be reachable after all\n                return .ignored\n            }\n\n        case .dead:\n            guard var myselfMember = self.member(for: self.peer) else {\n                return .applied(change: nil)\n            }\n\n            myselfMember.status = .dead\n            switch self.mark(self.peer, as: .dead) {\n            case .applied(.some(let previousStatus), _):\n                return .applied(change: .init(previousStatus: previousStatus, member: myselfMember))\n            default:\n                self.log.warning(\"\\(self.peer) already marked .dead\", metadata: self.metadata)\n                return .ignored\n            }\n        }\n    }\n\n    /// ### Unreachability status handling\n    /// Performs all special handling of `.unreachable` such that if it is disabled members are automatically promoted to `.dead`.\n    /// See `settings.unreachability` for more details.\n    private mutating func onOtherMemberGossipPayload(member: SWIM.Member<Peer>) -> [GossipProcessedDirective] {\n        assert(\n            self.node != member.node,\n            \"Attempted to process gossip as-if not-myself, but WAS same peer, was: \\(member). Myself: \\(self.peer, orElse: \"nil\")\"\n        )\n\n        guard self.isMember(member.peer) else {\n            // it's a new node it seems\n\n            guard member.node.uid != nil else {\n                self.log.debug(\n                    \"Incoming member has no `uid`, ignoring; cannot add members to membership without uid\",\n                    metadata: self.metadata([\n                        \"member\": \"\\(member)\",\n                        \"member/node\": \"\\(member.node.detailedDescription)\",\n                    ])\n                )\n                return []\n            }\n\n            // the Shell may need to set up a connection if we just made a move from previousStatus: nil,\n            // so we definitely need to emit this change\n            return self.addMember(member.peer, status: member.status).compactMap { directive in\n                switch directive {\n                case .added(let member):\n                    return .applied(change: SWIM.MemberStatusChangedEvent(previousStatus: nil, member: member))\n                case .previousHostPortMemberConfirmedDead(let change):\n                    return .applied(change: change)\n                case .memberAlreadyKnownDead:\n                    return nil\n                case .newerMemberAlreadyPresent(let member):\n                    return .applied(change: SWIM.MemberStatusChangedEvent(previousStatus: nil, member: member))\n                }\n            }\n        }\n\n        var directives: [GossipProcessedDirective] = []\n        switch self.mark(member.peer, as: member.status) {\n        case .applied(let previousStatus, let member):\n            if member.status.isSuspect, previousStatus?.isAlive ?? false {\n                self.log.debug(\n                    \"Member [\\(member.peer.node, orElse: \"<unknown-node>\")] marked as suspect, via incoming gossip\",\n                    metadata: self.metadata\n                )\n            }\n            directives.append(.applied(change: .init(previousStatus: previousStatus, member: member)))\n\n        case .ignoredDueToOlderStatus(let currentStatus):\n            self.log.trace(\n                \"Gossip about member \\(member.node), incoming: [\\(member.status)] does not supersede current: [\\(currentStatus)]\",\n                metadata: self.metadata\n            )\n        }\n\n        return directives\n    }\n\n    /// Indicates the gossip payload was processed and changes to the membership were made.\n    public enum GossipProcessedDirective: Equatable {\n        /// The gossip was applied to the local membership view and an event may want to be emitted for it.\n        ///\n        /// It is up to the shell implementation which events are published, but generally it is recommended to\n        /// only publish changes which are `SWIM.MemberStatusChangedEvent.isReachabilityChange` as those can and should\n        /// usually be acted on by high level implementations.\n        ///\n        /// Changes between alive and suspect are an internal implementation detail of SWIM,\n        /// and usually do not need to be emitted as events to users.\n        ///\n        /// ### Note for connection based implementations\n        /// You may need to establish a new connection if the changes' `previousStatus` is `nil`, as it means we have\n        /// not seen this member before and in order to send messages to it, one may want to eagerly establish a connection to it.\n        case applied(change: SWIM.MemberStatusChangedEvent<Peer>?)\n\n        static var ignored: Self {\n            .applied(change: nil)\n        }\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Confirm Dead\n\n    public mutating func confirmDead(peer: Peer) -> ConfirmDeadDirective {\n        if self.member(for: peer) == nil,\n            self._members.first(where: { $0.key == peer.node }) == nil\n        {\n            return .ignored  // this peer is absolutely unknown to us, we should not even emit events about it\n        }\n\n        switch self.mark(peer, as: .dead) {\n        case .applied(let previousStatus, let member):\n            return .applied(change: SWIM.MemberStatusChangedEvent(previousStatus: previousStatus, member: member))\n\n        case .ignoredDueToOlderStatus:\n            return .ignored  // it was already dead for example\n        }\n    }\n\n    /// Directs how to handle the result of a `confirmDead` call.\n    public enum ConfirmDeadDirective {\n        /// The change was applied and caused a membership change.\n        ///\n        /// The change should be emitted as an event by an interpreting shell.\n        case applied(change: SWIM.MemberStatusChangedEvent<Peer>)\n\n        /// The confirmation had not effect, either the peer was not known, or is already dead.\n        case ignored\n    }\n\n    /// Returns if this node is known to have already been marked dead at some point.\n    func hasTombstone(_ node: Node) -> Bool {\n        guard let uid = node.uid else {\n            return false\n        }\n\n        let anythingAsNotTakenIntoAccountInEquality: UInt64 = 0\n        return self.removedDeadMemberTombstones.contains(\n            .init(uid: uid, deadlineProtocolPeriod: anythingAsNotTakenIntoAccountInEquality)\n        )\n    }\n\n    private mutating func cleanupTombstones() {  // time to cleanup the tombstones\n        self.removedDeadMemberTombstones = self.removedDeadMemberTombstones.filter {\n            // keep the ones where their deadline is still in the future\n            self.protocolPeriod < $0.deadlineProtocolPeriod\n        }\n    }\n\n    /// Used to store known \"confirmed dead\" member unique identifiers.\n    struct MemberTombstone: Hashable {\n        /// UID of the dead member\n        let uid: UInt64\n        /// After how many protocol periods (\"ticks\") should this tombstone be cleaned up\n        let deadlineProtocolPeriod: UInt64\n\n        func hash(into hasher: inout Hasher) {\n            hasher.combine(self.uid)\n        }\n\n        static func == (lhs: MemberTombstone, rhs: MemberTombstone) -> Bool {\n            lhs.uid == rhs.uid\n        }\n    }\n}\n\nextension SWIM.Instance: CustomDebugStringConvertible {\n    public var debugDescription: String {\n        // multi-line on purpose\n        \"\"\"\n        SWIM.Instance(\n            settings: \\(settings),\n            \n            myself: \\(String(reflecting: peer)),\n                                \n            _incarnation: \\(_incarnation),\n            _protocolPeriod: \\(_protocolPeriod), \n\n            members: [\n                \\(_members.map { \"\\($0.key)\" }.joined(separator: \"\\n        \"))\n            ] \n            membersToPing: [ \n                \\(membersToPing.map { \"\\($0)\" }.joined(separator: \"\\n        \"))\n            ]\n             \n            _messagesToGossip: \\(_messagesToGossip)\n        )\n        \"\"\"\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: SWIM Lifeguard Local Health Modifier event\n\nextension SWIM.Instance {\n    /// Events which cause the modification of the Local health aware Multiplier to be adjusted.\n    ///\n    /// The LHM is increased (in increments of `1`) whenever an event occurs that indicates that the instance\n    /// is not processing incoming messages in timely order.\n    ///\n    /// It is decreased and decreased (by `1`), whenever it processes a successful ping/ack cycle,\n    /// meaning that is is healthy and properly processing incoming messages on time.\n    ///\n    /// - SeeAlso: Lifeguard IV.A. Local Health Aware Probe, which describes the rationale behind the events.\n    public enum LHModifierEvent: Equatable {\n        /// A successful ping/ack probe cycle was completed.\n        case successfulProbe\n        /// A direct ping/ack cycle has failed (timed-out).\n        case failedProbe\n        /// Some other member has suspected this member, and we had to refute the suspicion.\n        case refutingSuspectMessageAboutSelf\n        /// During a `pingRequest` the ping request origin (us) received a timeout without seeing `.nack`\n        /// from the intermediary member; This could mean we are having network trouble and are a faulty node.\n        case probeWithMissedNack\n\n        /// - Returns: by how much the LHM should be adjusted in response to this event.\n        ///   The adjusted value MUST be clamped between `0 <= value <= maxLocalHealthMultiplier`\n        var lhmAdjustment: Int {\n            switch self {\n            case .successfulProbe:\n                return -1  // decrease the LHM\n            case .failedProbe,\n                .refutingSuspectMessageAboutSelf,\n                .probeWithMissedNack:\n                return 1  // increase the LHM\n            }\n        }\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: SWIM Logging Metadata\n\nextension SWIM.Instance {\n    /// Allows for convenient adding of additional metadata to the `SWIM.Instance.metadata`.\n    public func metadata(_ additional: Logger.Metadata) -> Logger.Metadata {\n        var metadata = self.metadata\n        metadata.merge(additional, uniquingKeysWith: { _, r in r })\n        return metadata\n    }\n\n    /// While the SWIM.Instance is not meant to be logging by itself, it does offer metadata for loggers to use.\n    public var metadata: Logger.Metadata {\n        [\n            \"swim/protocolPeriod\": \"\\(self.protocolPeriod)\",\n            \"swim/timeoutSuspectsBeforePeriodMax\": \"\\(self.timeoutSuspectsBeforePeriodMax)\",\n            \"swim/timeoutSuspectsBeforePeriodMin\": \"\\(self.timeoutSuspectsBeforePeriodMin)\",\n            \"swim/incarnation\": \"\\(self.incarnation)\",\n            \"swim/members/all\": Logger.Metadata.Value.array(self.members.map { \"\\(reflecting: $0)\" }),\n            \"swim/members/count\": \"\\(self.notDeadMemberCount)\",\n            \"swim/suspects/count\": \"\\(self.suspects.count)\",\n        ]\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/SWIMProtocol.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\n\nimport struct Dispatch.DispatchTime\n\n#if canImport(Darwin)\nimport Darwin\n#elseif canImport(Glibc)\nimport Glibc\n#elseif canImport(Musl)\nimport Musl\n#else\n#error(\"Unsupported platform\")\n#endif\n\n/// ## Scalable Weakly-consistent Infection-style Process Group Membership Protocol\n///\n/// > As you swim lazily through the milieu, <br/>\n/// > The secrets of the world will infect you.\n///\n/// Implementation of the SWIM protocol in abstract terms, not dependent on any specific runtime.\n/// The actual implementation resides in `SWIM.Instance`.\n///\n/// ### Terminology\n/// This implementation follows the original terminology mostly directly, with the notable exception of the original\n/// wording of \"confirm\" being rather represented as `SWIM.Status.dead`, as we found the \"confirm\" wording to be\n/// confusing in practice.\n///\n/// ### Extensions & Modifications\n///\n/// This implementation has a few notable extensions and modifications implemented, some documented already in the initial\n/// SWIM paper, some in the Lifeguard extensions paper and some being simple adjustments we found practical in our environments.\n///\n/// - The \"random peer selection\" is not completely ad-hoc random, but follows a _stable order_, randomized on peer insertion.\n///   - Unlike the completely random selection in the original paper. This has the benefit of consistently going \"around\"\n///     all peers participating in the cluster, enabling a more efficient spread of membership information among peers,\n///     by allowing us to avoid continuously (yet randomly) selecting the same few peers.\n///   - This optimization is described in the original SWIM paper, and followed by some implementations.\n///\n/// - Introduction of an `.unreachable` status, that is ordered after `.suspect` and before `.dead`.\n///   - This is because the decision to move an unreachable peer to .dead status is a large and important decision,\n///     in which user code may want to participate, e.g. by attempting \"shoot the other peer in the head\" or other patterns,\n///     before triggering the `.dead` status (which usually implies a complete removal of information of that peer existence from the cluster),\n///     after which no further communication with given peer will ever be possible anymore.\n///   - The `.unreachable` status is optional and _disabled_ by default.\n///   - Other SWIM implementations handle this problem by _storing_ dead members for a period of time after declaring them dead,\n///     also deviating from the original paper; so we conclude that this use case is quite common and allow addressing it in various ways.\n///\n/// - Preservation of `.unreachable` information\n///   - The original paper does not keep in memory information about dead peers,\n///     it only gossips the information that a member is now dead, but does not keep tombstones for later reference.\n///\n/// Implementations of extensions documented in the Lifeguard paper (linked below):\n///\n/// - Local Health Aware Probe - which replaces the static timeouts in probing with a dynamic one, taking into account\n///   recent communication failures of our member with others.\n/// - Local Health Aware Suspicion - which improves the way `.suspect` states and their timeouts are handled,\n///   effectively relying on more information about unreachability. See: `suspicionTimeout`.\n/// - Buddy System - enables members to directly and immediately notify suspect peers about them being suspected,\n///   such that they have more time and a chance to refute these suspicions more quickly, rather than relying on completely\n///   random gossip for that suspicion information to reach such suspect peer.\n///\n/// SWIM serves as a low-level distributed failure detector mechanism.\n/// It also maintains its own membership in order to monitor and select peers to ping with periodic health checks,\n/// however this membership is not directly the same as the high-level membership exposed by the `Cluster`.\n///\n/// ### SWIM Membership\n/// SWIM provides a weakly consistent view on the process group membership.\n/// Membership in this context means that we have some knowledge about the node, that was acquired by either\n/// communicating with the peer directly, for example when initially connecting to the cluster,\n/// or because some other peer shared information about it with us.\n/// To avoid moving a peer \"back\" into alive or suspect state because of older statuses that get replicated,\n/// we need to be able to put them into temporal order. For this reason each peer has an incarnation number assigned to it.\n///\n/// This number is monotonically increasing and can only be incremented by the respective peer itself and only if it is\n/// suspected by another peer in its current incarnation.\n///\n/// The ordering of statuses is as follows:\n///\n///     alive(N) < suspect(N) < alive(N+1) < suspect(N+1) < dead\n///\n/// A member that has been declared dead can *never* return from that status and has to be restarted to join the cluster.\n/// Note that such \"restarted node\" from SWIM's perspective is simply a new node which happens to occupy the same host/port,\n/// as nodes are identified by their unique identifiers (`ClusterMembership.Node.uid`).\n///\n/// The information about dead nodes will be kept for a configurable amount of time, after which it will be removed to\n/// prevent the state on each node from growing too big. The timeout value should be chosen to be big enough to prevent\n/// faulty nodes from re-joining the cluster and is usually in the order of a few days.\n///\n/// ### SWIM Gossip\n///\n/// SWIM uses an infection style gossip mechanism to replicate state across the cluster.\n/// The gossip payload contains information about other node’s observed status, and will be disseminated throughout the\n/// cluster by piggybacking onto periodic health check messages, i.e. whenever a node is sending a ping, a ping request,\n/// or is responding with an acknowledgement, it will include the latest gossip with that message as well. When a node\n/// receives gossip, it has to apply the statuses to its local state according to the ordering stated above. If a node\n/// receives gossip about itself, it has to react accordingly.\n///\n/// If it is suspected by another peer in its current incarnation, it has to increment its incarnation in response.\n/// If it has been marked as dead, it SHOULD shut itself down (i.e. terminate the entire node / service), to avoid \"zombie\"\n/// nodes staying around even though they are already ejected from the cluster.\n///\n/// ### SWIM Protocol Logic Implementation\n///\n/// See `SWIM.Instance` for a detailed discussion on the implementation.\n///\n/// ### Further Reading\n///\n/// - [SWIM: Scalable Weakly-consistent Infection-style Process Group Membership Protocol](https://www.cs.cornell.edu/projects/Quicksilver/public_pdfs/SWIM.pdf)\n/// - [Lifeguard: Local Health Awareness for More Accurate Failure Detection](https://arxiv.org/abs/1707.00788)\npublic enum SWIM {}\n\n/// This protocol defines all callbacks that a SWIM Shell (in other words, \"runtime\") must implement to properly drive\n/// the underlying SWIM.Instance (which contains the complete logic of SWIM).\npublic protocol SWIMProtocol {\n    associatedtype Peer: SWIMPeer\n    associatedtype PingOrigin: SWIMPingOriginPeer\n    associatedtype PingRequestOrigin: SWIMPingRequestOriginPeer\n\n    typealias Instance = SWIM.Instance<Peer, PingOrigin, PingRequestOrigin>\n\n    /// MUST be invoked periodically, in intervals of `self.swim.dynamicLHMProtocolInterval`.\n    ///\n    /// MUST NOT be scheduled using a \"repeated\" task/timer, as the interval is dynamic and may change as the algorithm proceeds.\n    /// Implementations should schedule each next tick by handling the returned directive's `scheduleNextTick` case,\n    /// which includes the appropriate delay to use for the next protocol tick.\n    ///\n    /// This is the heart of the protocol, as each tick corresponds to a \"protocol period\" in which:\n    /// - suspect members are checked if they're overdue and should become `.unreachable` or `.dead`,\n    /// - decisions are made to `.ping` a random peer for fault detection,\n    /// - and some internal house keeping is performed.\n    ///\n    /// Note: This means that effectively all decisions are made in interval of protocol periods.\n    /// It would be possible to have a secondary periodic or more ad-hoc interval to speed up\n    /// some operations, however this is currently not implemented and the protocol follows the fairly\n    /// standard mode of simply carrying payloads in periodic ping messages.\n    ///\n    /// - Returns: `SWIM.Instance.PeriodicPingTickDirective` which must be interpreted by a shell implementation\n    mutating func onPeriodicPingTick() -> [Instance.PeriodicPingTickDirective]\n\n    /// MUST be invoked whenever a `ping` message is received.\n    ///\n    /// A specific shell implementation must act on the returned directives.\n    /// The order of interpreting the events should be as returned by the onPing invocation.\n    ///\n    /// - parameters:\n    ///   - pingOrigin: the origin peer that issued this `ping`, it should be replied to (as instructed in the returned ping directive)\n    ///   - payload: gossip information to be processed by this peer, resulting in potentially discovering new information about other members of the cluster\n    ///   - sequenceNumber: sequence number of this ping, will be used to reply to the ping's origin using the same sequence number\n    /// - Returns: `Instance.PingDirective` which must be interpreted by a shell implementation\n    mutating func onPing(\n        pingOrigin: PingOrigin,\n        payload: SWIM.GossipPayload<Peer>,\n        sequenceNumber: SWIM.SequenceNumber\n    ) -> [Instance.PingDirective]\n\n    /// MUST be invoked when a `pingRequest` is received.\n    ///\n    /// The returned directives will instruct an implementation to perform probes of available peers on behalf of\n    ///\n    /// - parameters:\n    ///   - target: target peer which this instance was asked to indirectly ping.\n    ///   - pingRequestOrigin: the origin of this ping request; it should be notified with an .ack once we get a reply from the probed peer\n    ///   - payload: gossip information to be processed by this peer, resulting in potentially discovering new information about other members of the cluster\n    ///   - sequenceNumber: the sequenceNumber of the incoming `pingRequest`, used to reply with the appropriate sequence number once we get an `ack` from the target\n    /// - Returns: `Instance.` which must be interpreted by a shell implementation\n    mutating func onPingRequest(\n        target: Peer,\n        pingRequestOrigin: PingRequestOrigin,\n        payload: SWIM.GossipPayload<Peer>,\n        sequenceNumber: SWIM.SequenceNumber\n    ) -> [Instance.PingRequestDirective]\n\n    /// MUST be invoked when a ping response (or timeout) occur for a specific ping.\n    ///\n    /// - parameters:\n    ///   - response: the response (or timeout) related to this ping\n    ///   - pingRequestOrigin: if this ping was issued on behalf of a `pingRequestOrigin`, that peer, otherwise `nil`\n    ///   - pingRequestSequenceNumber: if this ping was issued on behalf of a `pingRequestOrigin`, then the sequence number of that `pingRequest`, otherwise `nil`\n    /// - Returns: `Instance.PingResponseDirective` which must be interpreted by a shell implementation\n    mutating func onPingResponse(\n        response: SWIM.PingResponse<Peer, PingRequestOrigin>,\n        pingRequestOrigin: PingRequestOrigin?,\n        pingRequestSequenceNumber: SWIM.SequenceNumber?\n    ) -> [Instance.PingResponseDirective]\n\n    /// MUST be invoked exactly in one of the two following situations:\n    /// - the *first successful response* from any number of `ping` messages that this peer has performed on behalf of a `pingRequestOrigin`,\n    /// - just one single time with a `timeout` if *none* of the pings successfully returned an `ack`.\n    ///\n    /// - parameters:\n    ///   - response: the response representing this ping's result (i.e. `ack` or `timeout`).\n    ///   - pinged: the pinged peer that this response is from\n    /// - Returns: `Instance.PingRequestResponseDirective` which must be interpreted by a shell implementation\n    mutating func onPingRequestResponse(\n        _ response: SWIM.PingResponse<Peer, PingRequestOrigin>,\n        pinged: Peer\n    ) -> [Instance.PingRequestResponseDirective]\n\n    /// MUST be invoked whenever a response to a `pingRequest` (an ack, nack or lack response i.e. a timeout) happens.\n    ///\n    /// This function is adjusting Local Health and MUST be invoked on **every** received response to a pingRequest,\n    /// in order for the local health adjusted timeouts to be calculated correctly.\n    ///\n    /// - parameters:\n    ///   - response: the response representing\n    ///   - pinged: the pinged peer that this response is from\n    /// - Returns: `Instance.PingRequestResponseDirective` which must be interpreted by a shell implementation\n    mutating func onEveryPingRequestResponse(\n        _ response: SWIM.PingResponse<Peer, PingRequestOrigin>,\n        pinged: Peer\n    ) -> [Instance.PingRequestResponseDirective]\n\n    /// Optional, only relevant when using `settings.unreachable` status mode (which is disabled by default).\n    ///\n    /// When `.unreachable` members are allowed, this function MUST be invoked to promote a node into `.dead` state.\n    ///\n    /// In other words, once a `MemberStatusChangedEvent` for an unreachable member has been emitted,\n    /// a higher level system may take additional action and then determine when to actually confirm it dead.\n    /// Systems can implement additional split-brain prevention mechanisms on those layers for example.\n    ///\n    /// Once a node is determined dead by such higher level system, it may invoke `swim.confirmDead(peer: theDefinitelyDeadPeer`,\n    /// to mark the node as dead, with all of its consequences.\n    ///\n    /// - Parameter peer: the peer which should be confirmed dead.\n    /// - Returns: `Instance.ConfirmDeadDirective` which must be interpreted by a shell implementation\n    mutating func confirmDead(peer: Peer) -> Instance.ConfirmDeadDirective\n}\n"
  },
  {
    "path": "Sources/SWIM/Settings.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\n\nimport struct Dispatch.DispatchTime\n\n#if canImport(Darwin)\nimport Darwin\n#elseif canImport(Glibc)\nimport Glibc\n#elseif canImport(Musl)\nimport Musl\n#else\n#error(\"Unsupported platform\")\n#endif\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: SWIM Settings\n\nextension SWIM {\n    /// Settings generally applicable to the SWIM implementation as well as any shell running it.\n    public struct Settings {\n        /// Create default settings.\n        public init() {}\n\n        /// Logger used by the instance and shell (unless the specific shell implementation states otherwise).\n        public var logger: Logger = Logger(label: \"swim\")\n\n        /// Convenience setting to change the `logger`'s log level.\n        public var logLevel: Logger.Level {\n            get {\n                self.logger.logLevel\n            }\n            set {\n                self.logger.logLevel = newValue\n            }\n        }\n\n        /// Gossip settings, configures how the protocol period time intervals and gossip characteristics.\n        public var gossip: SWIMGossipSettings = .init()\n\n        /// Settings of the Lifeguard extensions to the SWIM protocol.\n        public var lifeguard: SWIMLifeguardSettings = .init()\n\n        /// Settings for metrics to be emitted by the SWIM.Instance automatically.\n        public var metrics: SWIMMetricsSettings = .init()\n\n        /// Configures the node of this SWIM instance explicitly, including allowing setting it's UID.\n        ///\n        /// Depending on runtime, setting this value explicitly may not be necessary,\n        /// as the node can be inferred from the host/port the specific shell is bound to.\n        ///\n        /// If neither, the node could be inferred, or is set explicitly, a fatal crash should be caused by the SWIM shell implementation.\n        public var node: Node?\n\n        /// Number of indirect probes that will be issued once a direct ping probe has failed to reply in time with an ack.\n        ///\n        /// In case of small clusters where nr. of neighbors is smaller than this value, the most neighbors available will\n        /// be asked to issue an indirect probe. E.g. a 3 node cluster, configured with `indirectChecks = 3` has only `1`\n        /// remaining node it can ask for an indirect probe (since 1 node is ourselves, and 1 node is the potentially suspect node itself).\n        public var indirectProbeCount: Int = 3 {\n            willSet {\n                precondition(\n                    newValue >= 0,\n                    \"`indirectChecks` MUST be >= 0. It is recommended to have it be no lower than 3.\"\n                )\n            }\n        }\n\n        /// When a member is \"confirmed dead\" we stop gossiping about it and in order to prevent a node to accidentally\n        /// re-join the cluster by us having fully forgotten about it while it still remains lingering around, we use tombstones.\n        ///\n        /// The time to live configures how long the tombstones are kept around, meaning some accumulating overhead,\n        /// however added safety in case the node \"comes back\". Note that this may be solved on higher level layers\n        /// e.g. by forbidding such node to even form a connection to us in a connection-ful implementation, in such case\n        /// lower timeouts are permittable.\n        ///\n        /// Assuming a default of 1 second per protocol period (probe interval), the default value results in 4 hours of delay.\n        public var tombstoneTimeToLiveInTicks: UInt64 =\n            4 * 60 * 60\n\n        /// An interval, as expressed in number of `probeInterval` ticks.\n        ///\n        /// Every so often the additional task of checking the accumulated tombstones for any overdue ones (see `tombstoneTimeToLive`),\n        /// will be performed. Outdated tombstones are then removed. This is done this way to benefit from using a plain Set of the tombstones\n        /// for the checking if a peer has a tombstone or not (O(1), performed frequently), while only having to clean them up periodically (O(n)).\n        public var tombstoneCleanupIntervalInTicks: Int = 5 * 60 {\n            willSet {\n                precondition(newValue > 0, \"`tombstoneCleanupIntervalInTicks` MUST be > 0\")\n            }\n        }\n\n        /// Optional feature: Set of \"initial contact points\" to automatically contact and join upon starting a node\n        ///\n        /// Optionally, a Shell implementation MAY use this setting automatically contact a set of initial contact point nodes,\n        /// allowing a new member to easily join existing clusters (e.g. if there is one \"known\" address to contact upon starting).\n        ///\n        /// Consult your Shell implementations of frameworks' documentation if this feature is supported, or handled in alternative ways.\n        /// // TODO: This could be made more generic with \"pluggable\" discovery mechanism.\n        ///\n        /// Note: This is sometimes also referred to \"seed nodes\" and a \"seed node join process\".\n        public var initialContactPoints: Set<ClusterMembership.Node> = []\n\n        /// Interval at which gossip messages should be issued.\n        /// This property sets only a base value of probe interval, which will later be multiplied by `SWIM.Instance.localHealthMultiplier`.\n        /// - SeeAlso: `maxLocalHealthMultiplier`\n        /// Every `interval` a `fan-out` number of gossip messages will be sent.\n        public var probeInterval: Duration = .seconds(1)\n\n        /// Time amount after which a sent ping without ack response is considered timed-out.\n        /// This drives how a node becomes a suspect, by missing such ping/ack rounds.\n        ///\n        /// This property sets only a base timeout value, which is later multiplied by `localHealthMultiplier`\n        /// Note that after an initial ping/ack timeout, secondary indirect probes are issued,\n        /// and only after exceeding `suspicionTimeoutPeriodsMax` shall the node be declared as `.unreachable`,\n        /// which results in an `Cluster.MemberReachabilityChange` `Cluster.Event` which downing strategies may act upon.\n        ///\n        /// - Note: Ping timeouts generally should be set as a multiple of the RTT (round-trip-time) expected in the deployment environment.\n        ///\n        /// - SeeAlso: `SWIMLifeguardSettings.maxLocalHealthMultiplier` which affects the \"effective\" ping timeouts used in runtime.\n        public var pingTimeout: Duration = .milliseconds(300)\n\n        /// Optional SWIM Protocol Extension: `SWIM.MemberStatus.unreachable`\n        ///\n        /// This is a custom extension to the standard SWIM statuses which first moves a member into unreachable state,\n        /// while still trying to ping it, while awaiting for a final \"mark it `.dead` now\" from an external system.\n        ///\n        /// This allows for collaboration between external and internal monitoring systems before committing a node as `.dead`.\n        /// The `.unreachable` state IS gossiped throughout the cluster same as alive/suspect are, while a `.dead` member is not gossiped anymore,\n        /// as it is effectively removed from the membership. This allows for additional spreading of the unreachable observation throughout\n        /// the cluster, as an observation, but not as an action (of removing given member).\n        ///\n        /// The `.unreachable` state therefore from a protocol perspective, is equivalent to a `.suspect` member status.\n        ///\n        /// Unless you _know_ you need un-reachability, do not enable this mode, as it requires additional actions to be taken,\n        /// to confirm a node as dead, complicating the failure detection and node pruning.\n        ///\n        /// By default this option is disabled, and the SWIM implementation behaves same as documented in the papers,\n        /// meaning that when a node remains unresponsive for an exceeded amount of time it is marked as `.dead` immediately.\n        public var unreachability: UnreachabilitySettings = .disabled\n\n        /// Configure how unreachability should be handled by this instance.\n        public enum UnreachabilitySettings {\n            /// Do not use the .unreachable state and just like classic SWIM automatically announce a node as `.dead`,\n            /// if failure detection triggers.\n            ///\n            /// Warning: DO NOT run clusters with mixed reachability settings.\n            ///     In mixed deployments having a single node not understand unreachability will result\n            ///     in it promoting an incoming `.unreachable` status to `.dead` and continue spreading this information.\n            ///\n            ///     This can defeat the purpose of unreachability, as it can be used to wait to announce the final `.dead`,\n            ///     move after consulting an external participant, and with a node unaware of unreachability\n            ///     this would short-circut this \"wait for decision\".\n            case disabled\n            /// Enables the `.unreachable` status extension.\n            /// Most deployments will not need to utilize this mode.\n            ///\n            /// Reachability changes are emitted as `SWIM.MemberStatusChangedEvent` and allow an external participant to\n            /// decide the final `confirmDead` which should be invoked on the swim instance when decided.\n            ///\n            /// For other intents and purposes, unreachable is operationally equivalent to a suspect node,\n            /// in that it MAY return to being alive again.\n            case enabled\n        }\n\n        /// This is not a part of public API. SWIM is using time to schedule pings/calculate timeouts.\n        /// When designing tests one may want to simulate scenarios when events are coming in particular order.\n        /// Doing this will require some control over SWIM's notion of time.\n        ///\n        /// This property allows to override the `.now()` function for mocking purposes.\n        internal var timeSourceNow: () -> DispatchTime = { () -> DispatchTime in\n            DispatchTime.now()\n        }\n\n        #if TRACELOG_SWIM\n        /// When enabled traces _all_ incoming SWIM protocol communication (remote messages).\n        public var traceLogLevel: Logger.Level? = .warning\n        #else\n        /// When enabled traces _all_ incoming SWIM protocol communication (remote messages).\n        public var traceLogLevel: Logger.Level?\n        #endif\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: SWIM Gossip Settings\n\n/// Settings specific to the gossip payloads used in the SWIM gossip dissemination subsystem.\npublic struct SWIMGossipSettings {\n    /// Create default settings.\n    public init() {}\n\n    /// Limits the number of `GossipPayload`s to be piggy-backed in a single message.\n    ///\n    /// Notes: The Ping/Ack messages are used to piggy-back the gossip information along those messages.\n    /// In order to prevent these messages from growing too large, heuristics or a simple limit must be imposed on them/\n    /// Currently, we limit the message sizes by simply counting how many gossip payloads are allowed to be carried.\n    public var maxNumberOfMessagesPerGossip: Int = 12\n\n    /// Each gossip (i.e. an observation by this specific node of a specific node's specific status),\n    /// is gossiped only a limited number of times, after which the algorithms\n    ///\n    /// - parameters:\n    ///   - gossip: the payload\n    ///   - n: total number of cluster members (including myself), MUST be >= 1 (or will crash)\n    ///\n    /// - SeeAlso: SWIM 4.1. Infection-Style Dissemination Component\n    /// - SeeAlso: SWIM 5. Performance Evaluation of a Prototype\n    public func gossipedEnoughTimes(_ gossip: SWIM.Gossip<some SWIMPeer>, members n: Int) -> Bool {\n        precondition(n >= 1, \"number of members MUST be >= 1\")\n        guard n > 1 else {\n            // no need to gossip ever in a single node cluster\n            return false\n        }\n        let maxTimesDouble = self.gossipedEnoughTimesBaseMultiplier * log2(Double(n + 1))\n        return gossip.numberOfTimesGossiped > Int(maxTimesDouble)\n    }\n\n    internal func needsToBeGossipedMoreTimes(_ gossip: SWIM.Gossip<some SWIMPeer>, members n: Int) -> Bool {\n        !self.gossipedEnoughTimes(gossip, members: n)\n    }\n\n    /// Used to adjust the `gossipedEnoughTimes` value.\n    ///\n    /// Should not be lower than 3, since for\n    ///\n    /// - SeeAlso: SWIM 5. Performance Evaluation of a Prototype\n    public var gossipedEnoughTimesBaseMultiplier: Double = 3 {\n        willSet {\n            precondition(newValue > 0, \"number of members MUST be > 0\")\n            self.gossipedEnoughTimesBaseMultiplier = newValue\n        }\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: SWIM Lifeguard extensions Settings\n\n/// Lifeguard is a set of extensions to SWIM that helps reducing false positive failure detections.\n///\n/// - SeeAlso: [Lifeguard: Local Health Awareness for More Accurate Failure Detection](https://arxiv.org/pdf/1707.00788.pdf)\npublic struct SWIMLifeguardSettings {\n    /// Create default settings.\n    public init() {}\n\n    /// Local health multiplier is a part of Lifeguard extensions to SWIM.\n    /// It will increase local probe interval and probe timeout if the instance is not processing messages in timely manner.\n    /// This property will define the upper limit to local health multiplier.\n    ///\n    /// Must be greater than 0. To effectively disable the LHM extension you may set this to `1`.\n    ///\n    /// - SeeAlso: [Lifeguard IV.A. Local Health Multiplier (LHM)](https://arxiv.org/pdf/1707.00788.pdf)\n    public var maxLocalHealthMultiplier: Int = 8 {\n        willSet {\n            precondition(newValue >= 0, \"Local health multiplier MUST BE >= 0\")\n        }\n    }\n\n    /// Suspicion timeouts are specified as number of probe intervals.\n    ///\n    /// E.g. a `suspicionTimeoutMax = .seconds(10)` means that a suspicious node will be escalated as `.unreachable`  at most after approximately 10 seconds. Suspicion timeout will decay logarithmically to `suspicionTimeoutMin`\n    /// with additional suspicions arriving. When no additional suspicions present, suspicion timeout will equal `suspicionTimeoutMax`\n    ///\n    /// ### Modification:\n    /// We introduce an extra state of \"unreachable\" is introduced, which is signalled to a high-level membership implementation,\n    /// which may then confirm it, then leading the SWIM membership to mark the given member as `.dead`. Unlike the original SWIM/Lifeguard\n    /// implementations which proceed to `.dead` automatically. This separation allows running with SWIM failure detection in an \"informational\"\n    /// mode.\n    ///\n    /// Once it is confirmed dead by the high-level membership (e.g. immediately, or after an additional grace period, or vote),\n    /// it will be marked `.dead` in SWIM, and `.down` in the high-level membership.\n    ///\n    /// - SeeAlso: [Lifeguard IV.B. Local Health Aware Suspicion (LHA-Suspicion)](https://arxiv.org/pdf/1707.00788.pdf)\n    public var suspicionTimeoutMax: Duration = .seconds(10) {\n        willSet {\n            precondition(\n                newValue.nanoseconds >= self.suspicionTimeoutMin.nanoseconds,\n                \"`suspicionTimeoutMax` MUST BE >= `suspicionTimeoutMin`\"\n            )\n        }\n    }\n\n    /// To ensure ping origin have time to process .nack, indirect ping timeout should always be shorter than originator's timeout\n    /// This property controls a multiplier that's applied to `pingTimeout` when calculating indirect probe timeout.\n    /// The default of 80% follows a proposal in the initial paper.\n    /// The value should be between 0 and 1 (exclusive).\n    ///\n    /// - SeeAlso: `pingTimeout`\n    /// - SeeAlso: [Lifeguard IV.B. Local Health Aware Suspicion (LHA-Suspicion)](https://arxiv.org/pdf/1707.00788.pdf)\n    public var indirectPingTimeoutMultiplier: Double = 0.8 {\n        willSet {\n            precondition(newValue > 0, \"Ping timeout multiplier should be > 0\")\n            precondition(newValue < 1, \"Ping timeout multiplier should be < 1\")\n        }\n    }\n\n    /// Suspicion timeouts are specified as number of probe intervals.\n    ///\n    /// E.g. a `suspicionTimeoutMin = .seconds(3)` means that a suspicious node will be escalated as `.unreachable` at least after approximately 3 seconds.\n    /// Suspicion timeout will decay logarithmically from `suspicionTimeoutMax` / with additional suspicions arriving.\n    /// When number of suspicions reach `maxIndependentSuspicions`, suspicion timeout will equal `suspicionTimeoutMin`\n    ///\n    /// ### Modification:\n    /// An extra state of \"unreachable\" is introduced, which is signalled to a high-level membership implementation,\n    /// which may then confirm it, then leading the SWIM membership to mark the given member as `.dead`. Unlike the original SWIM/Lifeguard\n    /// implementations which proceed to `.dead` automatically. This separation allows running with SWIM failure detection in an \"informational\"\n    /// mode.\n    ///\n    /// Once it is confirmed dead by the high-level membership (e.g. immediately, or after an additional grace period, or vote),\n    /// it will be marked `.dead` in swim, and `.down` in the high-level membership.\n    ///\n    /// - SeeAlso: [Lifeguard IV.B. Local Health Aware Suspicion (LHA-Suspicion)](https://arxiv.org/pdf/1707.00788.pdf)\n    public var suspicionTimeoutMin: Duration = .seconds(3) {\n        willSet {\n            precondition(\n                newValue.nanoseconds <= self.suspicionTimeoutMax.nanoseconds,\n                \"`suspicionTimeoutMin` MUST BE <= `suspicionTimeoutMax`\"\n            )\n        }\n    }\n\n    /// A number of independent suspicions required for a suspicion timeout to fully decay to a minimal value.\n    ///\n    /// When set to 1 will effectively disable LHA-suspicion.\n    public var maxIndependentSuspicions = 4 {\n        willSet {\n            precondition(newValue > 0, \"`settings.cluster.swim.maxIndependentSuspicions` MUST BE > 0\")\n        }\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: SWIM Metrics Settings\n\n/// Configure label names and other details about metrics reported by the `SWIM.Instance`.\npublic struct SWIMMetricsSettings {\n    public init() {}\n\n    /// Configure the segments separator for use when creating labels;\n    /// Some systems like graphite like \".\" as the separator, yet others may not treat this as legal character.\n    ///\n    /// Typical alternative values are \"/\" or \"_\", though consult your metrics backend before changing this setting.\n    public var segmentSeparator: String = \".\"\n\n    /// Prefix all metrics with this segment.\n    ///\n    /// If set, this is used as the first part of a label name, followed by `labelPrefix`.\n    public var systemName: String?\n\n    /// Label string prefixed before all emitted metrics names in their labels.\n    ///\n    /// - SeeAlso: `systemName`, if set, is prefixed before `labelPrefix` when creating label names.\n    public var labelPrefix: String? = \"swim\"\n\n    func makeLabel(_ segments: String...) -> String {\n        let systemNamePart: String = self.systemName.map { \"\\($0)\\(self.segmentSeparator)\" } ?? \"\"\n        let systemMetricsPrefixPart: String = self.labelPrefix.map { \"\\($0)\\(self.segmentSeparator)\" } ?? \"\"\n        let joinedSegments = segments.joined(separator: self.segmentSeparator)\n\n        return \"\\(systemNamePart)\\(systemMetricsPrefixPart)\\(joinedSegments)\"\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/Status.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\n\nextension SWIM {\n    /// The SWIM membership status reflects how a node is perceived by the distributed failure detector.\n    ///\n    /// ### Modification: Unreachable status (opt-in)\n    /// If the unreachable status extension is enabled, it is set / when a classic SWIM implementation would have\n    /// declared a node `.dead`, / yet since we allow for the higher level membership to decide when and how to eject\n    /// members from a cluster, / only the `.unreachable` state is set and an `Cluster.ReachabilityChange` cluster event\n    /// is emitted. / In response to this a high-level membership protocol MAY confirm the node as dead by issuing\n    /// `Instance.confirmDead`, / which will promote the node to `.dead` in SWIM terms.\n    ///\n    /// > The additional `.unreachable` status is only used it enabled explicitly by setting `settings.unreachable`\n    /// > to enabled. Otherwise, the implementation performs its failure checking as usual and directly marks detected\n    /// > to be failed members as `.dead`.\n    ///\n    /// ### Legal transitions:\n    /// - `alive -> suspect`\n    /// - `alive -> suspect`, with next `SWIM.Incarnation`, e.g. during flaky network situations, we suspect and un-suspect a node depending on probing\n    /// - `suspect -> unreachable | alive`, if in SWIM terms, a node is \"most likely dead\" we declare it `.unreachable` instead, and await for high-level confirmation to mark it `.dead`.\n    /// - `unreachable -> alive | suspect`, with next `SWIM.Incarnation` optional)\n    /// - `alive | suspect | unreachable -> dead`\n    ///\n    /// - SeeAlso: `SWIM.Incarnation`\n    public enum Status: Hashable, Sendable {\n        /// Indicates an `alive` member of the cluster, i.e. if is reachable and properly replies to all probes on time.\n        case alive(incarnation: Incarnation)\n        /// Indicates a `suspect` member of the cluster, meaning that it did not reply on time to probing and MAY be unreachable.\n        /// Further probing and indirect probing will be performed to test if it really is unreachable/dead,\n        /// or just had a small glitch (or network issues).\n        case suspect(incarnation: Incarnation, suspectedBy: Set<Node>)\n        /// Extension from traditional SWIM states: indicates an unreachable node, under traditional SWIM it would have\n        /// already been marked `.dead`, however unreachability allows for a final extra step including a `swim.confirmDead()`\n        /// call, to move the unreachable node to dead state.\n        ///\n        /// This only matters for multi layer membership protocols which use SWIM as their failure detection mechanism.\n        ///\n        /// This state is DISABLED BY DEFAULT, and if a node receives such unreachable status about another member while\n        /// this setting is disabled it will immediately treat such member as `.dead`. Do not run in mixed mode clusters,\n        /// as this can yield unexpected consequences.\n        case unreachable(incarnation: Incarnation)\n        /// Indicates\n        /// Note: In the original paper this state was referred to as \"confirm\", which we found slightly confusing, thus the rename.\n        case dead\n    }\n}\n\nextension SWIM.Status: Comparable {\n    public static func < (lhs: SWIM.Status, rhs: SWIM.Status) -> Bool {\n        switch (lhs, rhs) {\n        case (.alive(let selfIncarnation), .alive(let rhsIncarnation)):\n            return selfIncarnation < rhsIncarnation\n        case (.alive(let selfIncarnation), .suspect(let rhsIncarnation, _)):\n            return selfIncarnation <= rhsIncarnation\n        case (.alive(let selfIncarnation), .unreachable(let rhsIncarnation)):\n            return selfIncarnation <= rhsIncarnation\n        case (.suspect(let selfIncarnation, let selfSuspectedBy), .suspect(let rhsIncarnation, let rhsSuspectedBy)):\n            return selfIncarnation < rhsIncarnation\n                || (selfIncarnation == rhsIncarnation && selfSuspectedBy.isStrictSubset(of: rhsSuspectedBy))\n        case (.suspect(let selfIncarnation, _), .alive(let rhsIncarnation)):\n            return selfIncarnation < rhsIncarnation\n        case (.suspect(let selfIncarnation, _), .unreachable(let rhsIncarnation)):\n            return selfIncarnation <= rhsIncarnation\n        case (.unreachable(let selfIncarnation), .alive(let rhsIncarnation)):\n            return selfIncarnation < rhsIncarnation\n        case (.unreachable(let selfIncarnation), .suspect(let rhsIncarnation, _)):\n            return selfIncarnation < rhsIncarnation\n        case (.unreachable(let selfIncarnation), .unreachable(let rhsIncarnation)):\n            return selfIncarnation < rhsIncarnation\n        case (.dead, _):\n            return false\n        case (_, .dead):\n            return true\n        }\n    }\n}\n\nextension SWIM.Status {\n    /// Only `alive` or `suspect` members carry an incarnation number.\n    public var incarnation: SWIM.Incarnation? {\n        switch self {\n        case .alive(let incarnation):\n            return incarnation\n        case .suspect(let incarnation, _):\n            return incarnation\n        case .unreachable(let incarnation):\n            return incarnation\n        case .dead:\n            return nil\n        }\n    }\n\n    /// - Returns: true if the underlying member status is `.alive`, false otherwise.\n    public var isAlive: Bool {\n        switch self {\n        case .alive:\n            return true\n        case .suspect, .unreachable, .dead:\n            return false\n        }\n    }\n\n    /// - Returns: true if the underlying member status is `.suspect`, false otherwise.\n    public var isSuspect: Bool {\n        switch self {\n        case .suspect:\n            return true\n        case .alive, .unreachable, .dead:\n            return false\n        }\n    }\n\n    /// - Returns: true if the underlying member status is `.unreachable`, false otherwise.\n    public var isUnreachable: Bool {\n        switch self {\n        case .unreachable:\n            return true\n        case .alive, .suspect, .dead:\n            return false\n        }\n    }\n\n    /// - Returns: `true` if the underlying member status is `.unreachable`, false otherwise.\n    public var isDead: Bool {\n        switch self {\n        case .dead:\n            return true\n        case .alive, .suspect, .unreachable:\n            return false\n        }\n    }\n\n    /// - Returns `true` if `self` is greater than or equal to `other` based on the\n    ///   following ordering: `alive(N)` < `suspect(N)` < `alive(N+1)` < `suspect(N+1)` < `dead`\n    public func supersedes(_ other: SWIM.Status) -> Bool {\n        self >= other\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/Utils/Heap.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the SwiftNIO open source project\n//\n// Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.md for the list of SwiftNIO project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\n// Based on https://raw.githubusercontent.com/apple/swift-nio/bf2598d19359e43b4cfaffaff250986ebe677721/Sources/NIO/Heap.swift\n\n#if canImport(Darwin)\nimport Darwin\n#elseif canImport(Glibc)\nimport Glibc\n#elseif canImport(Musl)\nimport Musl\n#else\n#error(\"Unsupported platform\")\n#endif\n\ninternal enum HeapType {\n    case maxHeap\n    case minHeap\n\n    public func comparator<T: Comparable>(type: T.Type) -> (T, T) -> Bool {\n        switch self {\n        case .maxHeap:\n            return (>)\n        case .minHeap:\n            return (<)\n        }\n    }\n}\n\n/// Slightly modified version of SwiftNIO's Heap, by exposing the comparator.\ninternal struct Heap<T: Equatable> {\n    internal private(set) var storage: ContiguousArray<T> = []\n    private let comparator: (T, T) -> Bool\n\n    init(of type: T.Type = T.self, comparator: @escaping (T, T) -> Bool) {\n        self.comparator = comparator\n    }\n\n    // named `PARENT` in CLRS\n    private func parentIndex(_ i: Int) -> Int {\n        (i - 1) / 2\n    }\n\n    // named `LEFT` in CLRS\n    private func leftIndex(_ i: Int) -> Int {\n        2 * i + 1\n    }\n\n    // named `RIGHT` in CLRS\n    private func rightIndex(_ i: Int) -> Int {\n        2 * i + 2\n    }\n\n    // named `MAX-HEAPIFY` in CLRS\n    private mutating func heapify(_ index: Int) {\n        let left = self.leftIndex(index)\n        let right = self.rightIndex(index)\n\n        var root: Int\n        if left <= (self.storage.count - 1), self.comparator(self.storage[left], self.storage[index]) {\n            root = left\n        } else {\n            root = index\n        }\n\n        if right <= (self.storage.count - 1), self.comparator(self.storage[right], self.storage[root]) {\n            root = right\n        }\n\n        if root != index {\n            self.storage.swapAt(index, root)\n            self.heapify(root)\n        }\n    }\n\n    // named `HEAP-INCREASE-KEY` in CRLS\n    private mutating func heapRootify(index: Int, key: T) {\n        var index = index\n        if self.comparator(self.storage[index], key) {\n            fatalError(\"New key must be closer to the root than current key\")\n        }\n\n        self.storage[index] = key\n        while index > 0, self.comparator(self.storage[index], self.storage[self.parentIndex(index)]) {\n            self.storage.swapAt(index, self.parentIndex(index))\n            index = self.parentIndex(index)\n        }\n    }\n\n    public mutating func append(_ value: T) {\n        var i = self.storage.count\n        self.storage.append(value)\n        while i > 0, self.comparator(self.storage[i], self.storage[self.parentIndex(i)]) {\n            self.storage.swapAt(i, self.parentIndex(i))\n            i = self.parentIndex(i)\n        }\n    }\n\n    @discardableResult\n    public mutating func removeRoot() -> T? {\n        self.remove(index: 0)\n    }\n\n    @discardableResult\n    public mutating func remove(value: T) -> Bool {\n        if let idx = self.storage.firstIndex(of: value) {\n            self.remove(index: idx)\n            return true\n        } else {\n            return false\n        }\n    }\n\n    @discardableResult\n    public mutating func remove(where: (T) throws -> Bool) rethrows -> Bool {\n        if let idx = try self.storage.firstIndex(where: `where`) {\n            self.remove(index: idx)\n            return true\n        } else {\n            return false\n        }\n    }\n\n    @discardableResult\n    private mutating func remove(index: Int) -> T? {\n        guard self.storage.count > 0 else {\n            return nil\n        }\n        let element = self.storage[index]\n        let comparator = self.comparator\n        if self.storage.count == 1 || self.storage[index] == self.storage[self.storage.count - 1] {\n            self.storage.removeLast()\n        } else if !comparator(self.storage[index], self.storage[self.storage.count - 1]) {\n            self.heapRootify(index: index, key: self.storage[self.storage.count - 1])\n            self.storage.removeLast()\n        } else {\n            self.storage[index] = self.storage[self.storage.count - 1]\n            self.storage.removeLast()\n            self.heapify(index)\n        }\n        return element\n    }\n\n    internal func checkHeapProperty() -> Bool {\n        func checkHeapProperty(index: Int) -> Bool {\n            let li = self.leftIndex(index)\n            let ri = self.rightIndex(index)\n            if index >= self.storage.count {\n                return true\n            } else {\n                let me = self.storage[index]\n                var lCond = true\n                var rCond = true\n                if li < self.storage.count {\n                    let l = self.storage[li]\n                    lCond = !self.comparator(l, me)\n                }\n                if ri < self.storage.count {\n                    let r = self.storage[ri]\n                    rCond = !self.comparator(r, me)\n                }\n                return lCond && rCond && checkHeapProperty(index: li) && checkHeapProperty(index: ri)\n            }\n        }\n        return checkHeapProperty(index: 0)\n    }\n}\n\nextension Heap: CustomDebugStringConvertible {\n    var debugDescription: String {\n        guard self.storage.count > 0 else {\n            return \"<empty heap>\"\n        }\n        let descriptions = self.storage.map { String(describing: $0) }\n        let maxLen: Int = descriptions.map { $0.count }.max()!  // storage checked non-empty above\n        let paddedDescs = descriptions.map { (desc: String) -> String in\n            var desc = desc\n            while desc.count < maxLen {\n                if desc.count % 2 == 0 {\n                    desc = \" \\(desc)\"\n                } else {\n                    desc = \"\\(desc) \"\n                }\n            }\n            return desc\n        }\n\n        var all = \"\\n\"\n        let spacing = String(repeating: \" \", count: maxLen)\n        func subtreeWidths(rootIndex: Int) -> (Int, Int) {\n            let lcIdx = self.leftIndex(rootIndex)\n            let rcIdx = self.rightIndex(rootIndex)\n            var leftSpace = 0\n            var rightSpace = 0\n            if lcIdx < self.storage.count {\n                let sws = subtreeWidths(rootIndex: lcIdx)\n                leftSpace += sws.0 + sws.1 + maxLen\n            }\n            if rcIdx < self.storage.count {\n                let sws = subtreeWidths(rootIndex: rcIdx)\n                rightSpace += sws.0 + sws.1 + maxLen\n            }\n            return (leftSpace, rightSpace)\n        }\n        for (index, desc) in paddedDescs.enumerated() {\n            let (leftWidth, rightWidth) = subtreeWidths(rootIndex: index)\n            all += String(repeating: \" \", count: leftWidth)\n            all += desc\n            all += String(repeating: \" \", count: rightWidth)\n\n            func height(index: Int) -> Int {\n                Int(log2(Double(index + 1)))\n            }\n            let myHeight = height(index: index)\n            let nextHeight = height(index: index + 1)\n            if myHeight != nextHeight {\n                all += \"\\n\"\n            } else {\n                all += spacing\n            }\n        }\n        all += \"\\n\"\n        return all\n    }\n}\n\nstruct HeapIterator<T: Equatable>: IteratorProtocol {\n    typealias Element = T\n\n    private var heap: Heap<T>\n\n    init(heap: Heap<T>) {\n        self.heap = heap\n    }\n\n    mutating func next() -> T? {\n        self.heap.removeRoot()\n    }\n}\n\nextension Heap: Sequence {\n    typealias Element = T\n\n    var startIndex: Int { self.storage.startIndex }\n    var endIndex: Int { self.storage.endIndex }\n\n    var underestimatedCount: Int {\n        self.storage.count\n    }\n\n    func makeIterator() -> HeapIterator<T> {\n        HeapIterator(heap: self)\n    }\n\n    subscript(position: Int) -> T {\n        self.storage[position]\n    }\n\n    func index(after i: Int) -> Int {\n        i + 1\n    }\n\n    // TODO: document if cheap (AFAICS yes)\n    var count: Int {\n        self.storage.count\n    }\n}\n\nextension Heap where T: Comparable {\n    init?(type: HeapType, storage: ContiguousArray<T>) {\n        self.comparator = type.comparator(type: T.self)\n        self.storage = storage\n        if !self.checkHeapProperty() {\n            return nil\n        }\n    }\n\n    init(type: HeapType) {\n        self.comparator = type.comparator(type: T.self)\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/Utils/String+Extensions.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: String Interpolation: reflecting:\n\nextension String.StringInterpolation {\n    mutating func appendInterpolation(reflecting subject: Any?) {\n        self.appendLiteral(String(reflecting: subject))\n    }\n\n    mutating func appendInterpolation(reflecting subject: Any) {\n        self.appendLiteral(String(reflecting: subject))\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: String Interpolation: lineByLine:\n\nextension String.StringInterpolation {\n    mutating func appendInterpolation(lineByLine subject: [Any]) {\n        self.appendLiteral(\"\\n    \\(subject.map { \"\\($0)\" }.joined(separator: \"\\n    \"))\")\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: String Interpolation: _:orElse:\n\nextension String.StringInterpolation {\n    mutating func appendInterpolation<T>(_ value: T?, orElse defaultValue: String) {\n        self.appendLiteral(\"\\(value.map { \"\\($0)\" } ?? defaultValue)\")\n    }\n\n    mutating func appendInterpolation<T>(optional value: T?) {\n        self.appendLiteral(\"\\(value.map { \"\\($0)\" } ?? \"nil\")\")\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/Utils/_PrettyLog.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport Logging\n\nimport struct Foundation.Calendar\nimport struct Foundation.Date\nimport class Foundation.DateFormatter\nimport struct Foundation.Locale\n\n/// Pretty log formatter which prints log lines in the following multi line format,\n/// listing every metadata element in it's own, `//`-prefixed, line as well as pretty printing connections if set as `Logger.MetadataValue`.\n///\n/// Example output:\n///\n/// ```\n/// SWIMNIOSample: [2020-08-25 0:7:59.8420] [info] [Example.swift:66] Membership status changed: [udp://127.0.0.1:7001#7015602685756068157] is now [alive(incarnation: 0)]\n//// metadata:\n//// \"swim/member\": udp://127.0.0.1:7001#7015602685756068157\n//// \"swim/member/previousStatus\": unknown\n//// \"swim/member/status\": alive(incarnation: 0)\n/// ```\n///\n/// Pro tip: you may want to use a coloring terminal application, which colors lines prefixed with `//` with a slightly different color,\n/// which makes visually parsing metadata vs. log message lines even more visually pleasing.\npublic struct _SWIMPrettyMetadataLogHandler: LogHandler {\n    let CONSOLE_RESET = \"\\u{001B}[0;0m\"\n    let CONSOLE_BOLD = \"\\u{001B}[1m\"\n\n    let label: String\n\n    /// :nodoc:\n    public init(_ label: String) {\n        self.label = label\n    }\n\n    public subscript(metadataKey _: String) -> Logger.Metadata.Value? {\n        get {\n            [:]\n        }\n        set {}\n    }\n\n    public var metadata: Logger.Metadata = [:]\n    public var logLevel: Logger.Level = .trace\n\n    public func log(\n        level: Logger.Level,\n        message: Logger.Message,\n        metadata: Logger.Metadata?,\n        source: String,\n        file: String,\n        function: String,\n        line: UInt\n    ) {\n        var metadataString: String = \"\"\n        if let metadata = metadata {\n            if !metadata.isEmpty {\n                metadataString = \"\\n// metadata:\\n\"\n                for key in metadata.keys.sorted() {\n                    let value: Logger.MetadataValue = metadata[key]!\n                    let valueDescription = self.prettyPrint(metadata: value)\n\n                    var allString = \"\\n// \\\"\\(key)\\\": \\(valueDescription)\"\n                    if allString.contains(\"\\n\") {\n                        allString = String(\n                            allString.split(separator: \"\\n\").map { valueLine in\n                                if valueLine.starts(with: \"// \") {\n                                    return \"\\(valueLine)\\n\"\n                                } else {\n                                    return \"// \\(valueLine)\\n\"\n                                }\n                            }.joined(separator: \"\")\n                        )\n                    }\n                    metadataString.append(allString)\n                }\n                metadataString = String(metadataString.dropLast(1))\n            }\n        }\n        let date = self._createFormatter().string(from: Date())\n        let file = file.split(separator: \"/\").last ?? \"\"\n        let line = line\n        print(\n            \"\\(self.CONSOLE_BOLD)\\(self.label)\\(self.CONSOLE_RESET): [\\(date)] [\\(level)] [\\(file):\\(line)] \\(message)\\(metadataString)\"\n        )\n    }\n\n    internal func prettyPrint(metadata: Logger.MetadataValue) -> String {\n        var valueDescription = \"\"\n        switch metadata {\n        case .string(let string):\n            valueDescription = string\n        case .stringConvertible(let convertible):\n            valueDescription = convertible.description\n        case .array(let array):\n            valueDescription = \"\\n  \\(array.map { \"\\($0)\" }.joined(separator: \"\\n  \"))\"\n        case .dictionary(let metadata):\n            for k in metadata.keys {\n                valueDescription += \"\\(CONSOLE_BOLD)\\(k)\\(CONSOLE_RESET): \\(self.prettyPrint(metadata: metadata[k]!))\"\n            }\n        }\n\n        return valueDescription\n    }\n\n    private func _createFormatter() -> DateFormatter {\n        let formatter = DateFormatter()\n        formatter.dateFormat = \"y-MM-dd H:m:ss.SSSS\"\n        formatter.locale = Locale(identifier: \"en_US\")\n        formatter.calendar = Calendar(identifier: .gregorian)\n        return formatter\n    }\n}\n"
  },
  {
    "path": "Sources/SWIM/Utils/time.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nextension Swift.Duration {\n    typealias Value = Int64\n\n    var nanoseconds: Value {\n        let (seconds, attoseconds) = self.components\n        let sNanos = seconds * Value(1_000_000_000)\n        let asNanos = attoseconds / Value(1_000_000_000)\n        let (totalNanos, overflow) = sNanos.addingReportingOverflow(asNanos)\n        return overflow ? .max : totalNanos\n    }\n\n    /// The microseconds representation of the `TimeAmount`.\n    var microseconds: Value {\n        self.nanoseconds / TimeUnit.microseconds.rawValue\n    }\n\n    /// The milliseconds representation of the `TimeAmount`.\n    var milliseconds: Value {\n        self.nanoseconds / TimeUnit.milliseconds.rawValue\n    }\n\n    /// The seconds representation of the `TimeAmount`.\n    var seconds: Value {\n        self.nanoseconds / TimeUnit.seconds.rawValue\n    }\n\n    var isEffectivelyInfinite: Bool {\n        self.nanoseconds == .max\n    }\n\n    /// Represents number of nanoseconds within given time unit\n    enum TimeUnit: Value {\n        case days = 86_400_000_000_000\n        case hours = 3_600_000_000_000\n        case minutes = 60_000_000_000\n        case seconds = 1_000_000_000\n        case milliseconds = 1_000_000\n        case microseconds = 1000\n        case nanoseconds = 1\n\n        var abbreviated: String {\n            switch self {\n            case .nanoseconds: return \"ns\"\n            case .microseconds: return \"μs\"\n            case .milliseconds: return \"ms\"\n            case .seconds: return \"s\"\n            case .minutes: return \"m\"\n            case .hours: return \"h\"\n            case .days: return \"d\"\n            }\n        }\n\n        func duration(_ duration: Int) -> Duration {\n            switch self {\n            case .nanoseconds: return .nanoseconds(Value(duration))\n            case .microseconds: return .microseconds(Value(duration))\n            case .milliseconds: return .milliseconds(Value(duration))\n            case .seconds: return .seconds(Value(duration))\n            case .minutes: return .seconds(Value(duration) * 60)\n            case .hours: return .seconds(Value(duration) * 60 * 60)\n            case .days: return .seconds(Value(duration) * 24 * 60 * 60)\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "Sources/SWIMNIOExample/Coding.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020-2022 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport NIO\nimport SWIM\n\nimport class Foundation.JSONDecoder\nimport class Foundation.JSONEncoder\n\ntypealias SWIMNIODefaultEncoder = JSONEncoder\ntypealias SWIMNIODefaultDecoder = JSONDecoder\n\nextension SWIM.Message: Codable {\n    public enum DiscriminatorKeys: UInt8, Codable {\n        case ping = 0\n        case pingRequest = 1\n        case response_ack = 2\n        case response_nack = 3\n    }\n\n    public enum CodingKeys: CodingKey {\n        case _case\n        case replyTo\n        case payload\n        case sequenceNumber\n        case incarnation\n        case target\n    }\n\n    public init(from decoder: Decoder) throws {\n        let container = try decoder.container(keyedBy: CodingKeys.self)\n\n        switch try container.decode(DiscriminatorKeys.self, forKey: ._case) {\n        case .ping:\n            let replyTo = try container.decode(SWIM.NIOPeer.self, forKey: .replyTo)\n            let payload = try container.decode(SWIM.GossipPayload<SWIM.NIOPeer>.self, forKey: .payload)\n            let sequenceNumber = try container.decode(SWIM.SequenceNumber.self, forKey: .sequenceNumber)\n            self = .ping(replyTo: replyTo, payload: payload, sequenceNumber: sequenceNumber)\n\n        case .pingRequest:\n            let target = try container.decode(SWIM.NIOPeer.self, forKey: .target)\n            let replyTo = try container.decode(SWIM.NIOPeer.self, forKey: .replyTo)\n            let payload = try container.decode(SWIM.GossipPayload<SWIM.NIOPeer>.self, forKey: .payload)\n            let sequenceNumber = try container.decode(SWIM.SequenceNumber.self, forKey: .sequenceNumber)\n            self = .pingRequest(target: target, replyTo: replyTo, payload: payload, sequenceNumber: sequenceNumber)\n\n        case .response_ack:\n            let target = try container.decode(SWIM.NIOPeer.self, forKey: .target)\n            let incarnation = try container.decode(SWIM.Incarnation.self, forKey: .incarnation)\n            let payload = try container.decode(SWIM.GossipPayload<SWIM.NIOPeer>.self, forKey: .payload)\n            let sequenceNumber = try container.decode(SWIM.SequenceNumber.self, forKey: .sequenceNumber)\n            self = .response(\n                .ack(target: target, incarnation: incarnation, payload: payload, sequenceNumber: sequenceNumber)\n            )\n\n        case .response_nack:\n            let target = try container.decode(SWIM.NIOPeer.self, forKey: .target)\n            let sequenceNumber = try container.decode(SWIM.SequenceNumber.self, forKey: .sequenceNumber)\n            self = .response(.nack(target: target, sequenceNumber: sequenceNumber))\n        }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n        var container = encoder.container(keyedBy: CodingKeys.self)\n\n        switch self {\n        case .ping(let replyTo, let payload, let sequenceNumber):\n            try container.encode(DiscriminatorKeys.ping, forKey: ._case)\n            try container.encode(replyTo, forKey: .replyTo)\n            try container.encode(payload, forKey: .payload)\n            try container.encode(sequenceNumber, forKey: .sequenceNumber)\n\n        case .pingRequest(let target, let replyTo, let payload, let sequenceNumber):\n            try container.encode(DiscriminatorKeys.pingRequest, forKey: ._case)\n            try container.encode(target, forKey: .target)\n            try container.encode(replyTo, forKey: .replyTo)\n            try container.encode(payload, forKey: .payload)\n            try container.encode(sequenceNumber, forKey: .sequenceNumber)\n\n        case .response(.ack(let target, let incarnation, let payload, let sequenceNumber)):\n            try container.encode(DiscriminatorKeys.response_ack, forKey: ._case)\n            try container.encode(target.swimNode, forKey: .target)\n            try container.encode(incarnation, forKey: .incarnation)\n            try container.encode(payload, forKey: .payload)\n            try container.encode(sequenceNumber, forKey: .sequenceNumber)\n\n        case .response(.nack(let target, let sequenceNumber)):\n            try container.encode(DiscriminatorKeys.response_nack, forKey: ._case)\n            try container.encode(target.swimNode, forKey: .target)\n            try container.encode(sequenceNumber, forKey: .sequenceNumber)\n\n        case .response(let other):\n            fatalError(\"SWIM.Message.response(\\(other)) MUST NOT be serialized, this is a bug, please report an issue.\")\n        }\n    }\n}\n\nextension CodingUserInfoKey {\n    static let channelUserInfoKey = CodingUserInfoKey(rawValue: \"nio_peer_channel\")!\n}\n\nextension SWIM.NIOPeer: Codable {\n    public init(from decoder: Decoder) throws {\n        let container = try decoder.singleValueContainer()\n        let node = try container.decode(Node.self)\n        guard let channel = decoder.userInfo[.channelUserInfoKey] as? Channel else {\n            fatalError(\"Expected channelUserInfoKey to be present in userInfo, unable to decode SWIM.NIOPeer!\")\n        }\n        self.init(node: node, channel: channel)\n    }\n\n    public nonisolated func encode(to encoder: Encoder) throws {\n        var container = encoder.singleValueContainer()\n        try container.encode(self.node)\n    }\n}\n\nextension SWIM.Member: Codable {\n    public enum CodingKeys: CodingKey {\n        case node\n        case status\n        case protocolPeriod\n    }\n\n    public init(from decoder: Decoder) throws {\n        let container = try decoder.container(keyedBy: CodingKeys.self)\n        let peer = try container.decode(SWIM.NIOPeer.self, forKey: .node)\n        let status = try container.decode(SWIM.Status.self, forKey: .status)\n        let protocolPeriod = try container.decode(UInt64.self, forKey: .protocolPeriod)\n        // as!-safe, since we only have members of a NIO implementation, so Peer will be NIOPeer\n        self.init(peer: peer as! Peer, status: status, protocolPeriod: protocolPeriod, suspicionStartedAt: nil)\n    }\n\n    public func encode(to encoder: Encoder) throws {\n        var container = encoder.container(keyedBy: CodingKeys.self)\n        try container.encode(self.node, forKey: .node)\n        try container.encode(self.protocolPeriod, forKey: .protocolPeriod)\n        try container.encode(self.status, forKey: .status)\n    }\n}\n\nextension ClusterMembership.Node: Codable {\n    // TODO: This implementation has to parse a simplified URI-like representation of a node; need to harden the impl some more\n    public init(from decoder: Decoder) throws {\n        let container = try decoder.singleValueContainer()\n\n        // Repr is expected in format: `protocol://host:port#uid`\n        let repr = try container.decode(String.self)[...]\n        var atIndex = repr.startIndex\n\n        // protocol\n        guard let protocolEndIndex = repr.firstIndex(of: \":\") else {\n            throw SWIMSerializationError.missingField(\"`protocol`, in \\(repr)\", type: \"String\")\n        }\n        atIndex = protocolEndIndex\n        let proto = String(repr[..<atIndex])\n\n        // ://\n        atIndex = repr.index(after: atIndex)\n        guard repr[repr.index(after: atIndex)] == \"/\" else {\n            throw SWIMSerializationError.missingData(\"Node format illegal, was: \\(repr)\")\n        }\n        atIndex = repr.index(after: atIndex)\n        guard repr[repr.index(after: protocolEndIndex)] == \"/\" else {\n            throw SWIMSerializationError.missingData(\"Node format illegal, was: \\(repr)\")\n        }\n        atIndex = repr.index(after: atIndex)\n\n        let name: String?\n        if let nameEndIndex = repr[atIndex...].firstIndex(of: \"@\"), nameEndIndex < repr.endIndex {\n            name = String(repr[atIndex..<nameEndIndex])\n            atIndex = repr.index(after: nameEndIndex)\n        } else {\n            name = nil\n        }\n\n        // host\n        guard let hostEndIndex = repr[atIndex...].firstIndex(of: \":\") else {\n            throw SWIMSerializationError.missingData(\"Node format illegal, was: \\(repr), failed at `host` part\")\n        }\n        let host = String(repr[atIndex..<hostEndIndex])\n        atIndex = hostEndIndex\n\n        // :\n        atIndex = repr.index(after: atIndex)\n        // port\n        let portEndIndex = repr[atIndex...].firstIndex(of: \"#\") ?? repr.endIndex\n        guard let port = Int(String(repr[atIndex..<(portEndIndex)])) else {\n            throw SWIMSerializationError.missingData(\"Node format illegal, missing port, was: \\(repr)\")\n        }\n\n        let uid: UInt64?\n        if portEndIndex < repr.endIndex, repr[portEndIndex] == \"#\" {\n            atIndex = repr.index(after: portEndIndex)\n            let uidSubString = repr[atIndex..<repr.endIndex]\n            if uidSubString.isEmpty {\n                uid = nil\n            } else {\n                uid = UInt64(uidSubString)\n            }\n        } else {\n            uid = nil\n        }\n\n        self.init(protocol: proto, name: name, host: host, port: port, uid: uid)\n    }\n\n    public func encode(to encoder: Encoder) throws {\n        var container = encoder.singleValueContainer()\n        var repr = \"\\(self.protocol)://\"\n        if let name = self.name {\n            repr += \"\\(name)@\"\n        }\n        repr.append(\"\\(self.host):\\(self.port)\")\n        if let uid = self.uid {\n            repr.append(\"#\\(uid)\")\n        }\n        try container.encode(repr)\n    }\n}\n\nextension SWIM.GossipPayload: Codable {\n    public init(from decoder: Decoder) throws {\n        let container = try decoder.singleValueContainer()\n        let members: [SWIM.Member<SWIM.NIOPeer>] = try container.decode([SWIM.Member<SWIM.NIOPeer>].self)\n        if members.isEmpty {\n            self = .none\n        } else {\n            self = .membership(members as! [SWIM.Member<Peer>])  // as! safe, since we always have Peer == NIOPeer\n        }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n        var container = encoder.singleValueContainer()\n\n        switch self {\n        case .none:\n            let empty: [SWIM.Member<SWIM.NIOPeer>] = []\n            try container.encode(empty)\n\n        case .membership(let members):\n            try container.encode(members)\n        }\n    }\n}\n\nextension SWIM.Status: Codable {\n    public enum DiscriminatorKeys: Int, Codable {\n        case alive\n        case suspect\n        case unreachable\n        case dead\n    }\n\n    public enum CodingKeys: CodingKey {\n        case _status\n        case incarnation\n        case suspectedBy\n    }\n\n    public init(from decoder: Decoder) throws {\n        let container = try decoder.container(keyedBy: CodingKeys.self)\n        switch try container.decode(DiscriminatorKeys.self, forKey: ._status) {\n        case .alive:\n            let incarnation = try container.decode(SWIM.Incarnation.self, forKey: .incarnation)\n            self = .alive(incarnation: incarnation)\n\n        case .suspect:\n            let incarnation = try container.decode(SWIM.Incarnation.self, forKey: .incarnation)\n            let suspectedBy = try container.decode(Set<Node>.self, forKey: .suspectedBy)\n            self = .suspect(incarnation: incarnation, suspectedBy: suspectedBy)\n\n        case .unreachable:\n            let incarnation = try container.decode(SWIM.Incarnation.self, forKey: .incarnation)\n            self = .unreachable(incarnation: incarnation)\n\n        case .dead:\n            self = .dead\n        }\n    }\n\n    public func encode(to encoder: Encoder) throws {\n        var container = encoder.container(keyedBy: CodingKeys.self)\n\n        switch self {\n        case .alive(let incarnation):\n            try container.encode(DiscriminatorKeys.alive, forKey: ._status)\n            try container.encode(incarnation, forKey: .incarnation)\n\n        case .suspect(let incarnation, let suspectedBy):\n            try container.encode(DiscriminatorKeys.suspect, forKey: ._status)\n            try container.encode(incarnation, forKey: .incarnation)\n            try container.encode(suspectedBy, forKey: .suspectedBy)\n\n        case .unreachable(let incarnation):\n            try container.encode(DiscriminatorKeys.unreachable, forKey: ._status)\n            try container.encode(incarnation, forKey: .incarnation)\n\n        case .dead:\n            try container.encode(DiscriminatorKeys.dead, forKey: ._status)\n        }\n    }\n}\n\n/// Thrown when serialization failed\npublic enum SWIMSerializationError: Error {\n    case notSerializable(String)\n    case missingField(String, type: String)\n    case missingData(String)\n    case unknownEnumValue(Int)\n    case __nonExhaustiveAlwaysIncludeADefaultCaseWhenSwitchingOverTheseErrorsPlease\n}\n"
  },
  {
    "path": "Sources/SWIMNIOExample/Logging.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\nimport NIO\nimport SWIM\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Tracelog: SWIM [tracelog:SWIM]\n\nextension SWIMNIOShell {\n    /// Optional \"dump all messages\" logging.\n    ///\n    /// Enabled by `SWIM.Settings.traceLogLevel` or `-DTRACELOG_SWIM`\n    func tracelog(\n        _ type: TraceLogType,\n        message: @autoclosure () -> String,\n        file: String = #file,\n        function: String = #function,\n        line: UInt = #line\n    ) {\n        if let level = self.settings.swim.traceLogLevel {\n            self.log.log(\n                level: level,\n                \"[\\(self.myself.node)] \\(type.description) :: \\(message())\",\n                metadata: self.swim.metadata,\n                file: file,\n                function: function,\n                line: line\n            )\n        }\n    }\n\n    internal enum TraceLogType: CustomStringConvertible {\n        case send(to: SWIMAddressablePeer)\n        case reply(to: SWIMAddressablePeer)\n        case receive(pinged: SWIMAddressablePeer?)\n\n        static var receive: TraceLogType {\n            .receive(pinged: nil)\n        }\n\n        var description: String {\n            switch self {\n            case .send(let to):\n                return \"SEND(to:\\(to.swimNode))\"\n            case .receive(nil):\n                return \"RECV\"\n            case .receive(let .some(pinged)):\n                return \"RECV(pinged:\\(pinged.swimNode))\"\n            case .reply(let to):\n                return \"REPL(to:\\(to.swimNode))\"\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "Sources/SWIMNIOExample/Message.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\nimport NIO\nimport SWIM\n\nextension SWIM {\n    public enum Message {\n        case ping(replyTo: NIOPeer, payload: GossipPayload<NIOPeer>, sequenceNumber: SWIM.SequenceNumber)\n\n        /// \"Ping Request\" requests a SWIM probe.\n        case pingRequest(\n            target: NIOPeer,\n            replyTo: NIOPeer,\n            payload: GossipPayload<NIOPeer>,\n            sequenceNumber: SWIM.SequenceNumber\n        )\n\n        case response(PingResponse<NIOPeer, NIOPeer>)\n\n        var messageCaseDescription: String {\n            switch self {\n            case .ping(_, _, let nr):\n                return \"ping@\\(nr)\"\n            case .pingRequest(_, _, _, let nr):\n                return \"pingRequest@\\(nr)\"\n            case .response(.ack(_, _, _, let nr)):\n                return \"response/ack@\\(nr)\"\n            case .response(.nack(_, let nr)):\n                return \"response/nack@\\(nr)\"\n            case .response(.timeout(_, _, _, let nr)):\n                // not a \"real message\"\n                return \"response/timeout@\\(nr)\"\n            }\n        }\n\n        /// Responses are special treated, i.e. they may trigger a pending completion closure\n        var isResponse: Bool {\n            switch self {\n            case .response:\n                return true\n            default:\n                return false\n            }\n        }\n\n        var sequenceNumber: SWIM.SequenceNumber {\n            switch self {\n            case .ping(_, _, let sequenceNumber):\n                return sequenceNumber\n            case .pingRequest(_, _, _, let sequenceNumber):\n                return sequenceNumber\n            case .response(.ack(_, _, _, let sequenceNumber)):\n                return sequenceNumber\n            case .response(.nack(_, let sequenceNumber)):\n                return sequenceNumber\n            case .response(.timeout(_, _, _, let sequenceNumber)):\n                return sequenceNumber\n            }\n        }\n    }\n\n    public enum LocalMessage {\n        /// Sent by `ClusterShell` when wanting to join a cluster node by `Node`.\n        ///\n        /// Requests SWIM to monitor a node, which also causes an association to this node to be requested\n        /// start gossiping SWIM messages with the node once established.\n        case monitor(Node)\n\n        /// Sent by `ClusterShell` whenever a `cluster.down(node:)` command is issued.\n        ///\n        /// ### Warning\n        /// As both the `SWIMShell` or `ClusterShell` may play the role of origin of a command `cluster.down()`,\n        /// it is important that the `SWIMShell` does NOT issue another `cluster.down()` once a member it already knows\n        /// to be dead is `confirmDead`-ed again, as this would cause an infinite loop of the cluster and SWIM shells\n        /// telling each other about the dead node.\n        ///\n        /// The intended interactions are:\n        /// 1. user driven:\n        ///     - user issues `cluster.down(node)`\n        ///     - `ClusterShell` marks the node as `.down` immediately and notifies SWIM with `.confirmDead(node)`\n        ///     - `SWIMShell` updates its failure detection and gossip to mark the node as `.dead`\n        ///     - SWIM continues to gossip this `.dead` information to let other nodes know about this decision;\n        ///       * one case where it may not be able to do so is if the downed node == self node,\n        ///         in which case the system MAY decide to terminate as soon as possible, rather than stick around and tell others that it is leaving.\n        ///         Either scenarios are valid, with the \"stick around to tell others we are down/leaving\" being a \"graceful leaving\" scenario.\n        /// 2. failure detector driven, unreachable:\n        ///     - SWIM detects node(s) as potentially dead, rather than marking them `.dead` immediately it marks them as `.unreachable`\n        ///     - it notifies clusterShell with `.unreachable(node)`\n        ///       - the shell updates its `membership` to reflect the reachability status of given `node`; if users subscribe to reachability events,\n        ///         such events are emitted from here\n        ///     - (TODO: this can just be an peer listening to events once we have events subbing) the shell queries `downingProvider` for decision for downing the node\n        ///     - the downing provider MAY invoke `cluster.down()` based on its logic and reachability information\n        ///     - iff `cluster.down(node)` is issued, the same steps as in 1. are taken, leading to the downing of the node in question\n        /// 3. failure detector driven, dead:\n        ///     - SWIM detects `.dead` members in its failure detection gossip (as a result of 1. or 2.), immediately marking them `.dead` and invoking `cluster.down(node)`\n        ///     ~ (the following steps are exactly 1., however with pointing out one important decision in the SWIMShell)\n        ///     - `clusterShell` marks the node(s) as `.down`, and as it is the same code path as 1. and 2., also confirms to SWIM that `.confirmDead`\n        ///     - SWIM already knows those nodes are dead, and thus ignores the update, yet may continue to proceed gossiping the `.dead` information,\n        ///       e.g. until all nodes are informed of this fact\n        case confirmDead(Node)\n    }\n}\n"
  },
  {
    "path": "Sources/SWIMNIOExample/NIOPeer.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020-2022 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\nimport NIO\nimport NIOConcurrencyHelpers\nimport SWIM\n\nextension SWIM {\n    /// SWIMPeer designed to deliver messages over UDP in collaboration with the SWIMNIOHandler.\n    public actor NIOPeer: SWIMPeer, SWIMPingOriginPeer, SWIMPingRequestOriginPeer, CustomStringConvertible {\n        public let swimNode: ClusterMembership.Node\n        internal nonisolated var node: ClusterMembership.Node {\n            self.swimNode\n        }\n\n        internal let channel: Channel\n\n        public init(node: Node, channel: Channel) {\n            self.swimNode = node\n            self.channel = channel\n        }\n\n        public func ping(\n            payload: GossipPayload<SWIM.NIOPeer>,\n            from origin: SWIM.NIOPeer,\n            timeout: Swift.Duration,\n            sequenceNumber: SWIM.SequenceNumber\n        ) async throws -> PingResponse<SWIM.NIOPeer, SWIM.NIOPeer> {\n            try await withCheckedThrowingContinuation { continuation in\n                let message = SWIM.Message.ping(replyTo: origin, payload: payload, sequenceNumber: sequenceNumber)\n                let command = SWIMNIOWriteCommand(\n                    message: message,\n                    to: self.swimNode,\n                    replyTimeout: timeout.toNIO,\n                    replyCallback: { reply in\n                        switch reply {\n                        case .success(.response(.nack(_, _))):\n                            continuation.resume(\n                                throwing: SWIMNIOIllegalMessageTypeError(\n                                    \"Unexpected .nack reply to .ping message! Was: \\(reply)\"\n                                )\n                            )\n\n                        case .success(.response(let pingResponse)):\n                            assert(\n                                sequenceNumber == pingResponse.sequenceNumber,\n                                \"callback invoked with not matching sequence number! Submitted with \\(sequenceNumber) but invoked with \\(pingResponse.sequenceNumber)!\"\n                            )\n                            continuation.resume(returning: pingResponse)\n\n                        case .failure(let error):\n                            continuation.resume(throwing: error)\n\n                        case .success(let other):\n                            continuation.resume(\n                                throwing:\n                                    SWIMNIOIllegalMessageTypeError(\n                                        \"Unexpected message, got: [\\(other)]:\\(reflecting: type(of: other)) while expected \\(PingResponse<SWIM.NIOPeer, SWIM.NIOPeer>.self)\"\n                                    )\n                            )\n                        }\n                    }\n                )\n\n                self.channel.writeAndFlush(command, promise: nil)\n            }\n        }\n\n        public func pingRequest(\n            target: SWIM.NIOPeer,\n            payload: GossipPayload<SWIM.NIOPeer>,\n            from origin: SWIM.NIOPeer,\n            timeout: Duration,\n            sequenceNumber: SWIM.SequenceNumber\n        ) async throws -> PingResponse<SWIM.NIOPeer, SWIM.NIOPeer> {\n            try await withCheckedThrowingContinuation { continuation in\n                let message = SWIM.Message.pingRequest(\n                    target: target,\n                    replyTo: origin,\n                    payload: payload,\n                    sequenceNumber: sequenceNumber\n                )\n                let command = SWIMNIOWriteCommand(\n                    message: message,\n                    to: self.node,\n                    replyTimeout: timeout.toNIO,\n                    replyCallback: { reply in\n                        switch reply {\n                        case .success(.response(let pingResponse)):\n                            assert(\n                                sequenceNumber == pingResponse.sequenceNumber,\n                                \"callback invoked with not matching sequence number! Submitted with \\(sequenceNumber) but invoked with \\(pingResponse.sequenceNumber)!\"\n                            )\n                            continuation.resume(returning: pingResponse)\n\n                        case .failure(let error):\n                            continuation.resume(throwing: error)\n\n                        case .success(let other):\n                            continuation.resume(\n                                throwing: SWIMNIOIllegalMessageTypeError(\n                                    \"Unexpected message, got: \\(other) while expected \\(PingResponse<SWIM.NIOPeer, SWIM.NIOPeer>.self)\"\n                                )\n                            )\n                        }\n                    }\n                )\n\n                self.channel.writeAndFlush(command, promise: nil)\n            }\n        }\n\n        public func ack(\n            acknowledging sequenceNumber: SWIM.SequenceNumber,\n            target: SWIM.NIOPeer,\n            incarnation: Incarnation,\n            payload: GossipPayload<SWIM.NIOPeer>\n        ) {\n            let message = SWIM.Message.response(\n                .ack(target: target, incarnation: incarnation, payload: payload, sequenceNumber: sequenceNumber)\n            )\n            let command = SWIMNIOWriteCommand(\n                message: message,\n                to: self.node,\n                replyTimeout: .seconds(0),\n                replyCallback: nil\n            )\n\n            self.channel.writeAndFlush(command, promise: nil)\n        }\n\n        public func nack(\n            acknowledging sequenceNumber: SWIM.SequenceNumber,\n            target: SWIM.NIOPeer\n        ) {\n            let message = SWIM.Message.response(.nack(target: target, sequenceNumber: sequenceNumber))\n            let command = SWIMNIOWriteCommand(\n                message: message,\n                to: self.node,\n                replyTimeout: .seconds(0),\n                replyCallback: nil\n            )\n\n            self.channel.writeAndFlush(command, promise: nil)\n        }\n\n        public nonisolated var description: String {\n            \"NIOPeer(\\(self.node))\"\n        }\n    }\n}\n\nextension SWIM.NIOPeer: Hashable {\n    public nonisolated func hash(into hasher: inout Hasher) {\n        self.node.hash(into: &hasher)\n    }\n\n    public static func == (lhs: SWIM.NIOPeer, rhs: SWIM.NIOPeer) -> Bool {\n        lhs.node == rhs.node\n    }\n}\n\npublic struct SWIMNIOTimeoutError: Error, CustomStringConvertible {\n    let timeout: Duration\n    let message: String\n\n    init(timeout: NIO.TimeAmount, message: String) {\n        self.timeout = .nanoseconds(Int(timeout.nanoseconds))\n        self.message = message\n    }\n\n    init(timeout: Duration, message: String) {\n        self.timeout = timeout\n        self.message = message\n    }\n\n    public var description: String {\n        \"SWIMNIOTimeoutError(timeout: \\(self.timeout.prettyDescription), \\(self.message))\"\n    }\n}\n\npublic struct SWIMNIOIllegalMessageTypeError: Error, CustomStringConvertible {\n    let message: String\n\n    init(_ message: String) {\n        self.message = message\n    }\n\n    public var description: String {\n        \"SWIMNIOIllegalMessageTypeError(\\(self.message))\"\n    }\n}\n"
  },
  {
    "path": "Sources/SWIMNIOExample/SWIMNIOHandler.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport CoreMetrics\nimport Logging\nimport NIO\nimport NIOFoundationCompat\nimport SWIM\n\nimport struct Dispatch.DispatchTime\n\n#if canImport(FoundationEssentials)\nimport FoundationEssentials\n#else\nimport Foundation\n#endif\n\n/// `ChannelDuplexHandler` responsible for encoding/decoding SWIM messages to/from the `SWIMNIOShell`.\n///\n/// It is designed to work with `DatagramBootstrap`s, and the contained shell can send messages by writing `SWIMNIOSWIMNIOWriteCommand`\n/// data into the channel which this handler converts into outbound `AddressedEnvelope<ByteBuffer>` elements.\npublic final class SWIMNIOHandler: ChannelDuplexHandler {\n    public typealias InboundIn = AddressedEnvelope<ByteBuffer>\n    public typealias InboundOut = SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>\n    public typealias OutboundIn = SWIMNIOWriteCommand\n    public typealias OutboundOut = AddressedEnvelope<ByteBuffer>\n\n    let settings: SWIMNIO.Settings\n    var log: Logger {\n        self.settings.logger\n    }\n\n    // initialized in channelActive\n    var shell: SWIMNIOShell!\n    var metrics: SWIM.Metrics.ShellMetrics?\n\n    var pendingReplyCallbacks: [PendingResponseCallbackIdentifier: (Result<SWIM.Message, Error>) -> Void]\n\n    public init(settings: SWIMNIO.Settings) {\n        self.settings = settings\n        self.pendingReplyCallbacks = [:]\n    }\n\n    public func channelActive(context: ChannelHandlerContext) {\n        guard let hostIP = context.channel.localAddress!.ipAddress else {\n            fatalError(\"SWIM requires a known host IP, but was nil! Channel: \\(context.channel)\")\n        }\n        guard let hostPort = context.channel.localAddress!.port else {\n            fatalError(\"SWIM requires a known host IP, but was nil! Channel: \\(context.channel)\")\n        }\n\n        var settings = self.settings\n        let node =\n            self.settings.swim.node\n            ?? Node(protocol: \"udp\", host: hostIP, port: hostPort, uid: .random(in: 0..<UInt64.max))\n        settings.swim.node = node\n        self.shell = SWIMNIOShell(\n            node: node,\n            settings: settings,\n            channel: context.channel,\n            onMemberStatusChange: { change in\n                context.eventLoop.execute {\n                    let wrapped = self.wrapInboundOut(change)\n                    context.fireChannelRead(wrapped)\n                }\n            }\n        )\n        self.metrics = self.shell.swim.metrics.shell\n\n        self.log.trace(\n            \"Channel active\",\n            metadata: [\n                \"nio/localAddress\": \"\\(context.channel.localAddress?.description ?? \"unknown\")\"\n            ]\n        )\n    }\n\n    public func channelUnregistered(context: ChannelHandlerContext) {\n        self.shell.receiveShutdown()\n        context.fireChannelUnregistered()\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Write Messages\n\n    public func write(context: ChannelHandlerContext, data: NIOAny, promise: EventLoopPromise<Void>?) {\n        let writeCommand = self.unwrapOutboundIn(data)\n\n        self.log.trace(\n            \"Write command: \\(writeCommand.message.messageCaseDescription)\",\n            metadata: [\n                \"write/message\": \"\\(writeCommand.message)\",\n                \"write/recipient\": \"\\(writeCommand.recipient)\",\n                \"write/reply-timeout\": \"\\(writeCommand.replyTimeout)\",\n            ]\n        )\n\n        do {\n            // TODO: note that this impl does not handle \"new node on same host/port\" yet\n\n            // register and manage reply callback ------------------------------\n            if let replyCallback = writeCommand.replyCallback {\n                let sequenceNumber = writeCommand.message.sequenceNumber\n                #if DEBUG\n                let callbackKey = PendingResponseCallbackIdentifier(\n                    peerAddress: writeCommand.recipient,\n                    sequenceNumber: sequenceNumber,\n                    inResponseTo: writeCommand.message\n                )\n                #else\n                let callbackKey = PendingResponseCallbackIdentifier(\n                    peerAddress: writeCommand.recipient,\n                    sequenceNumber: sequenceNumber\n                )\n                #endif\n\n                let timeoutTask = context.eventLoop.scheduleTask(in: writeCommand.replyTimeout) {\n                    if let callback = self.pendingReplyCallbacks.removeValue(forKey: callbackKey) {\n                        callback(\n                            .failure(\n                                SWIMNIOTimeoutError(\n                                    timeout: writeCommand.replyTimeout,\n                                    message:\n                                        \"Timeout of [\\(callbackKey)], no reply to [\\(writeCommand.message.messageCaseDescription)] after \\(writeCommand.replyTimeout.prettyDescription())\"\n                                )\n                            )\n                        )\n                    }  // else, task fired already (should have been removed)\n                }\n\n                self.log.trace(\n                    \"Store callback: \\(callbackKey)\",\n                    metadata: [\n                        \"message\": \"\\(writeCommand.message)\",\n                        \"pending/callbacks\": Logger.MetadataValue.array(self.pendingReplyCallbacks.map { \"\\($0)\" }),\n                    ]\n                )\n                self.pendingReplyCallbacks[callbackKey] = { reply in\n                    timeoutTask.cancel()  // when we trigger the callback, we should also cancel the timeout task\n                    replyCallback(reply)  // successful reply received\n                }\n            }\n\n            // serialize & send message ----------------------------------------\n            let buffer = try self.serialize(message: writeCommand.message, using: context.channel.allocator)\n            let envelope = AddressedEnvelope(remoteAddress: writeCommand.recipient, data: buffer)\n\n            context.writeAndFlush(self.wrapOutboundOut(envelope), promise: promise)\n        } catch {\n            self.log.warning(\n                \"Write failed\",\n                metadata: [\n                    \"error\": \"\\(error)\"\n                ]\n            )\n        }\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Read Messages\n\n    public func channelRead(context: ChannelHandlerContext, data: NIOAny) {\n        let addressedEnvelope: AddressedEnvelope<ByteBuffer> = self.unwrapInboundIn(data)\n        let remoteAddress = addressedEnvelope.remoteAddress\n\n        do {\n            // deserialize ----------------------------------------\n            let message = try self.deserialize(addressedEnvelope.data, channel: context.channel)\n\n            self.log.trace(\n                \"Read successful: \\(message.messageCaseDescription)\",\n                metadata: [\n                    \"remoteAddress\": \"\\(remoteAddress)\",\n                    \"swim/message/type\": \"\\(message.messageCaseDescription)\",\n                    \"swim/message\": \"\\(message)\",\n                ]\n            )\n\n            if message.isResponse {\n                // if it's a reply, invoke the pending callback ------\n                // TODO: move into the shell: https://github.com/apple/swift-cluster-membership/issues/41\n                #if DEBUG\n                let callbackKey = PendingResponseCallbackIdentifier(\n                    peerAddress: remoteAddress,\n                    sequenceNumber: message.sequenceNumber,\n                    inResponseTo: nil\n                )\n                #else\n                let callbackKey = PendingResponseCallbackIdentifier(\n                    peerAddress: remoteAddress,\n                    sequenceNumber: message.sequenceNumber\n                )\n                #endif\n\n                if let index = self.pendingReplyCallbacks.index(forKey: callbackKey) {\n                    let (storedKey, callback) = self.pendingReplyCallbacks.remove(at: index)\n                    // TODO: UIDs of nodes matter\n                    self.log.trace(\n                        \"Received response, key: \\(callbackKey); Invoking callback...\",\n                        metadata: [\n                            \"pending/callbacks\": Logger.MetadataValue.array(self.pendingReplyCallbacks.map { \"\\($0)\" })\n                        ]\n                    )\n                    self.metrics?.pingResponseTime.recordNanoseconds(\n                        storedKey.nanosecondsSinceCallbackStored().nanoseconds\n                    )\n                    callback(.success(message))\n                } else {\n                    self.log.trace(\n                        \"No callback for \\(callbackKey); It may have been removed due to a timeout already.\",\n                        metadata: [\n                            \"pending callbacks\": Logger.MetadataValue.array(self.pendingReplyCallbacks.map { \"\\($0)\" })\n                        ]\n                    )\n                }\n            } else {\n                // deliver to the shell ------------------------------\n                self.shell.receiveMessage(message: message)\n            }\n        } catch {\n            self.log.error(\n                \"Read failed: \\(error)\",\n                metadata: [\n                    \"remoteAddress\": \"\\(remoteAddress)\",\n                    \"message/bytes/count\": \"\\(addressedEnvelope.data.readableBytes)\",\n                    \"error\": \"\\(error)\",\n                ]\n            )\n        }\n    }\n\n    public func errorCaught(context: ChannelHandlerContext, error: Error) {\n        self.log.error(\n            \"Error caught: \\(error)\",\n            metadata: [\n                \"nio/channel\": \"\\(context.channel)\",\n                \"swim/shell\": \"\\(self.shell, orElse: \"nil\")\",\n                \"error\": \"\\(error)\",\n            ]\n        )\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Serialization\n\nextension SWIMNIOHandler {\n    private func deserialize(_ bytes: ByteBuffer, channel: Channel) throws -> SWIM.Message {\n        var bytes = bytes\n        guard let data = bytes.readData(length: bytes.readableBytes) else {\n            throw MissingDataError(\"No data to read\")\n        }\n\n        self.metrics?.messageInboundCount.increment()\n        self.metrics?.messageInboundBytes.record(data.count)\n\n        let decoder = SWIMNIODefaultDecoder()\n        decoder.userInfo[.channelUserInfoKey] = channel\n        return try decoder.decode(SWIM.Message.self, from: data)\n    }\n\n    private func serialize(message: SWIM.Message, using allocator: ByteBufferAllocator) throws -> ByteBuffer {\n        let encoder = SWIMNIODefaultEncoder()\n        let data = try encoder.encode(message)\n\n        self.metrics?.messageOutboundCount.increment()\n        self.metrics?.messageOutboundBytes.record(data.count)\n\n        let buffer = data.withUnsafeBytes { bytes -> ByteBuffer in\n            var buffer = allocator.buffer(capacity: data.count)\n            buffer.writeBytes(bytes)\n            return buffer\n        }\n        return buffer\n    }\n}\n\n/// Used to a command to the channel pipeline to write the message,\n/// and install a reply handler for the specific sequence number associated with the message (along with a timeout)\n/// when a callback is provided.\npublic struct SWIMNIOWriteCommand {\n    /// SWIM message to be written.\n    public let message: SWIM.Message\n    /// Address of recipient peer where the message should be written to.\n    public let recipient: SocketAddress\n\n    /// If the `replyCallback` is set, what timeout should be set for a reply to come back from the peer.\n    public let replyTimeout: NIO.TimeAmount\n    /// Callback to be invoked (calling into the SWIMNIOShell) when a reply to this message arrives.\n    public let replyCallback: ((Result<SWIM.Message, Error>) -> Void)?\n\n    /// Create a write command.\n    public init(\n        message: SWIM.Message,\n        to recipient: Node,\n        replyTimeout: TimeAmount,\n        replyCallback: ((Result<SWIM.Message, Error>) -> Void)?\n    ) {\n        self.message = message\n        self.recipient = try! .init(ipAddress: recipient.host, port: recipient.port)  // try!-safe since the host/port is always safe\n        self.replyTimeout = replyTimeout\n        self.replyCallback = replyCallback\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Callback storage\n\n// TODO: move callbacks into the shell?\nstruct PendingResponseCallbackIdentifier: Hashable, CustomStringConvertible {\n    let peerAddress: SocketAddress  // FIXME: UID as well...?\n    let sequenceNumber: SWIM.SequenceNumber\n\n    let storedAt: DispatchTime = .now()\n\n    #if DEBUG\n    let inResponseTo: SWIM.Message?\n    #endif\n\n    func hash(into hasher: inout Hasher) {\n        hasher.combine(peerAddress)\n        hasher.combine(sequenceNumber)\n    }\n\n    static func == (lhs: PendingResponseCallbackIdentifier, rhs: PendingResponseCallbackIdentifier) -> Bool {\n        lhs.peerAddress == rhs.peerAddress && lhs.sequenceNumber == rhs.sequenceNumber\n    }\n\n    var description: String {\n        \"\"\"\n        PendingResponseCallbackIdentifier(\\\n        peerAddress: \\(peerAddress), \\\n        sequenceNumber: \\(sequenceNumber), \\\n        storedAt: \\(self.storedAt) (\\(nanosecondsSinceCallbackStored()) ago)\\\n        )\n        \"\"\"\n    }\n\n    func nanosecondsSinceCallbackStored(now: DispatchTime = .now()) -> Duration {\n        Duration.nanoseconds(Int(now.uptimeNanoseconds - storedAt.uptimeNanoseconds))\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Errors\n\nstruct MissingDataError: Error {\n    let message: String\n    init(_ message: String) {\n        self.message = message\n    }\n}\n"
  },
  {
    "path": "Sources/SWIMNIOExample/SWIMNIOShell.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020-2022 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\nimport Metrics\nimport NIO\nimport SWIM\n\nimport struct Dispatch.DispatchTime\n\n/// The SWIM shell is responsible for driving all interactions of the `SWIM.Instance` with the outside world.\n///\n/// - Warning: Take care to only interact with the shell through `receive...` prefixed functions, as they ensure that\n/// all operations performed on the shell are properly synchronized by hopping to the right event loop.\n///\n/// - SeeAlso: `SWIM.Instance` for detailed documentation about the SWIM protocol implementation.\npublic final class SWIMNIOShell {\n    var swim: SWIM.Instance<SWIM.NIOPeer, SWIM.NIOPeer, SWIM.NIOPeer>!\n\n    let settings: SWIMNIO.Settings\n    var log: Logger {\n        self.settings.logger\n    }\n\n    let eventLoop: EventLoop\n    let channel: Channel\n\n    let myself: SWIM.NIOPeer\n    public var peer: SWIM.NIOPeer {\n        self.myself\n    }\n\n    let onMemberStatusChange: (SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>) -> Void\n\n    public var node: Node {\n        self.myself.node\n    }\n\n    /// Cancellable of the periodicPingTimer (if it was kicked off)\n    private var nextPeriodicTickCancellable: SWIMCancellable?\n\n    internal init(\n        node: Node,\n        settings: SWIMNIO.Settings,\n        channel: Channel,\n        onMemberStatusChange: @escaping (SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>) -> Void\n    ) {\n        self.settings = settings\n\n        self.channel = channel\n        self.eventLoop = channel.eventLoop\n\n        let myself = SWIM.NIOPeer(node: node, channel: channel)\n        self.myself = myself\n        self.swim = SWIM.Instance(settings: settings.swim, myself: myself)\n\n        self.onMemberStatusChange = onMemberStatusChange\n        self.onStart(startPeriodicPingTimer: settings._startPeriodicPingTimer)\n    }\n\n    /// Initialize timers and other after-initialized tasks\n    private func onStart(startPeriodicPingTimer: Bool) {\n        // Immediately announce that \"we\" are alive\n        self.announceMembershipChange(.init(previousStatus: nil, member: self.swim.member))\n\n        // Immediately attempt to connect to initial contact points\n        self.settings.swim.initialContactPoints.forEach { node in\n            self.receiveStartMonitoring(node: node)\n        }\n\n        if startPeriodicPingTimer {\n            // Kick off timer for periodically pinging random cluster member (i.e. the periodic Gossip)\n            self.handlePeriodicProtocolPeriodTick()\n        }\n    }\n\n    /// Receive a shutdown signal and initiate the termination of the shell along with the swim protocol instance.\n    ///\n    /// Upon shutdown the myself member is marked as `.dead`, although it should not be expected to spread this\n    /// information to other nodes. It technically could, but it is not expected not required to.\n    public func receiveShutdown() {\n        guard self.eventLoop.inEventLoop else {\n            return self.eventLoop.execute {\n                self.receiveShutdown()\n            }\n        }\n\n        self.nextPeriodicTickCancellable?.cancel()\n        switch self.swim.confirmDead(peer: self.peer) {\n        case .applied(let change):\n            self.tryAnnounceMemberReachability(change: change)\n            self.log.info(\"\\(Self.self) shutdown\")\n        case .ignored:\n            ()  // ok\n        }\n    }\n\n    /// Start a *single* timer, to run the passed task after given delay.\n    @discardableResult\n    private func schedule(delay: Duration, _ task: @escaping () -> Void) -> SWIMCancellable {\n        self.eventLoop.assertInEventLoop()\n\n        let scheduled: Scheduled<Void> = self.eventLoop.scheduleTask(in: delay.toNIO) { () in task() }\n        return SWIMCancellable { scheduled.cancel() }\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Receiving messages\n\n    public func receiveMessage(message: SWIM.Message) {\n        guard self.eventLoop.inEventLoop else {\n            return self.eventLoop.execute {\n                self.receiveMessage(message: message)\n            }\n        }\n\n        self.tracelog(.receive, message: \"\\(message)\")\n\n        switch message {\n        case .ping(let replyTo, let payload, let sequenceNumber):\n            self.receivePing(pingOrigin: replyTo, payload: payload, sequenceNumber: sequenceNumber)\n\n        case .pingRequest(let target, let pingRequestOrigin, let payload, let sequenceNumber):\n            self.receivePingRequest(\n                target: target,\n                pingRequestOrigin: pingRequestOrigin,\n                payload: payload,\n                sequenceNumber: sequenceNumber\n            )\n\n        case .response(let pingResponse):\n            self.receivePingResponse(response: pingResponse, pingRequestOriginPeer: nil, pingRequestSequenceNumber: nil)\n        }\n    }\n\n    /// Allows for typical local interactions with the shell\n    public func receiveLocalMessage(message: SWIM.LocalMessage) {\n        guard self.eventLoop.inEventLoop else {\n            return self.eventLoop.execute {\n                self.receiveLocalMessage(message: message)\n            }\n        }\n\n        self.tracelog(.receive, message: \"\\(message)\")\n\n        switch message {\n        case .monitor(let node):\n            self.receiveStartMonitoring(node: node)\n\n        case .confirmDead(let node):\n            self.receiveConfirmDead(deadNode: node)\n        }\n    }\n\n    private func receivePing(\n        pingOrigin: SWIM.NIOPeer,\n        payload: SWIM.GossipPayload<SWIM.NIOPeer>,\n        sequenceNumber: SWIM.SequenceNumber\n    ) {\n        guard self.eventLoop.inEventLoop else {\n            return self.eventLoop.execute {\n                self.receivePing(pingOrigin: pingOrigin, payload: payload, sequenceNumber: sequenceNumber)\n            }\n        }\n\n        self.log.trace(\n            \"Received ping@\\(sequenceNumber)\",\n            metadata: self.swim.metadata([\n                \"swim/ping/pingOrigin\": \"\\(pingOrigin.swimNode)\",\n                \"swim/ping/payload\": \"\\(payload)\",\n                \"swim/ping/seqNr\": \"\\(sequenceNumber)\",\n            ])\n        )\n\n        let directives: [SWIM.Instance.PingDirective] = self.swim.onPing(\n            pingOrigin: pingOrigin.peer(self.channel),\n            payload: payload,\n            sequenceNumber: sequenceNumber\n        )\n        directives.forEach { directive in\n            switch directive {\n            case .gossipProcessed(let gossipDirective):\n                self.handleGossipPayloadProcessedDirective(gossipDirective)\n\n            case .sendAck(let pingOrigin, let pingedTarget, let incarnation, let payload, let sequenceNumber):\n                self.tracelog(.reply(to: pingOrigin), message: \"\\(directive)\")\n                Task {\n                    await pingOrigin.peer(self.channel).ack(\n                        acknowledging: sequenceNumber,\n                        target: pingedTarget,\n                        incarnation: incarnation,\n                        payload: payload\n                    )\n                }\n            }\n        }\n    }\n\n    private func receivePingRequest(\n        target: SWIM.NIOPeer,\n        pingRequestOrigin: SWIM.NIOPeer,\n        payload: SWIM.GossipPayload<SWIM.NIOPeer>,\n        sequenceNumber: SWIM.SequenceNumber\n    ) {\n        guard self.eventLoop.inEventLoop else {\n            return self.eventLoop.execute {\n                self.receivePingRequest(\n                    target: target,\n                    pingRequestOrigin: pingRequestOrigin,\n                    payload: payload,\n                    sequenceNumber: sequenceNumber\n                )\n            }\n        }\n\n        self.log.trace(\n            \"Received pingRequest\",\n            metadata: [\n                \"swim/pingRequest/origin\": \"\\(pingRequestOrigin.node)\",\n                \"swim/pingRequest/sequenceNumber\": \"\\(sequenceNumber)\",\n                \"swim/target\": \"\\(target.node)\",\n                \"swim/gossip/payload\": \"\\(payload)\",\n            ]\n        )\n\n        let directives = self.swim.onPingRequest(\n            target: target,\n            pingRequestOrigin: pingRequestOrigin,\n            payload: payload,\n            sequenceNumber: sequenceNumber\n        )\n        directives.forEach { directive in\n            switch directive {\n            case .gossipProcessed(let gossipDirective):\n                self.handleGossipPayloadProcessedDirective(gossipDirective)\n\n            case .sendPing(\n                let target,\n                let payload,\n                let pingRequestOriginPeer,\n                let pingRequestSequenceNumber,\n                let timeout,\n                let sequenceNumber\n            ):\n                Task {\n                    await self.sendPing(\n                        to: target,\n                        payload: payload,\n                        pingRequestOrigin: pingRequestOriginPeer,\n                        pingRequestSequenceNumber: pingRequestSequenceNumber,\n                        timeout: timeout,\n                        sequenceNumber: sequenceNumber\n                    )\n                }\n            }\n        }\n    }\n\n    ///   - pingRequestOrigin: is set only when the ping that this is a reply to was originated as a `pingRequest`.\n    func receivePingResponse(\n        response: SWIM.PingResponse<SWIM.NIOPeer, SWIM.NIOPeer>,\n        pingRequestOriginPeer: SWIM.NIOPeer?,\n        pingRequestSequenceNumber: SWIM.SequenceNumber?\n    ) {\n        guard self.eventLoop.inEventLoop else {\n            return self.eventLoop.execute {\n                self.receivePingResponse(\n                    response: response,\n                    pingRequestOriginPeer: pingRequestOriginPeer,\n                    pingRequestSequenceNumber: pingRequestSequenceNumber\n                )\n            }\n        }\n\n        self.log.trace(\n            \"Receive ping response: \\(response)\",\n            metadata: self.swim.metadata([\n                \"swim/pingRequest/origin\": \"\\(pingRequestOriginPeer, orElse: \"nil\")\",\n                \"swim/pingRequest/sequenceNumber\": \"\\(pingRequestSequenceNumber, orElse: \"nil\")\",\n                \"swim/response\": \"\\(response)\",\n                \"swim/response/sequenceNumber\": \"\\(response.sequenceNumber)\",\n            ])\n        )\n\n        let directives = self.swim.onPingResponse(\n            response: response,\n            pingRequestOrigin: pingRequestOriginPeer,\n            pingRequestSequenceNumber: pingRequestSequenceNumber\n        )\n        // optionally debug log all directives here\n        directives.forEach { directive in\n            switch directive {\n            case .gossipProcessed(let gossipDirective):\n                self.handleGossipPayloadProcessedDirective(gossipDirective)\n\n            case .sendAck(let pingRequestOrigin, let acknowledging, let target, let incarnation, let payload):\n                Task {\n                    await pingRequestOrigin.ack(\n                        acknowledging: acknowledging,\n                        target: target,\n                        incarnation: incarnation,\n                        payload: payload\n                    )\n                }\n\n            case .sendNack(let pingRequestOrigin, let acknowledging, let target):\n                Task {\n                    await pingRequestOrigin.nack(acknowledging: acknowledging, target: target)\n                }\n\n            case .sendPingRequests(let pingRequestDirective):\n                Task {\n                    await self.sendPingRequests(pingRequestDirective)\n                }\n            }\n        }\n    }\n\n    func receiveEveryPingRequestResponse(\n        result: SWIM.PingResponse<SWIM.NIOPeer, SWIM.NIOPeer>,\n        pingedPeer: SWIM.NIOPeer\n    ) {\n        guard self.eventLoop.inEventLoop else {\n            return self.eventLoop.execute {\n                self.receiveEveryPingRequestResponse(result: result, pingedPeer: pingedPeer)\n            }\n        }\n        self.tracelog(.receive(pinged: pingedPeer), message: \"\\(result)\")\n        let directives = self.swim.onEveryPingRequestResponse(result, pinged: pingedPeer)\n        if !directives.isEmpty {\n            fatalError(\n                \"\"\"\n                Ignored directive from: onEveryPingRequestResponse! \\\n                This directive used to be implemented as always returning no directives. \\\n                Check your shell implementations if you updated the SWIM library as it seems this has changed. \\\n                Directive was: \\(directives), swim was: \\(self.swim.metadata)\n                \"\"\"\n            )\n        }\n    }\n\n    func receivePingRequestResponse(result: SWIM.PingResponse<SWIM.NIOPeer, SWIM.NIOPeer>, pingedPeer: SWIM.NIOPeer) {\n        guard self.eventLoop.inEventLoop else {\n            return self.eventLoop.execute {\n                self.receivePingRequestResponse(result: result, pingedPeer: pingedPeer)\n            }\n        }\n\n        self.tracelog(.receive(pinged: pingedPeer), message: \"\\(result)\")\n        // TODO: do we know here WHO replied to us actually? We know who they told us about (with the ping-req), could be useful to know\n\n        // FIXME: change those directives\n        let directives: [SWIM.Instance.PingRequestResponseDirective] = self.swim.onPingRequestResponse(\n            result,\n            pinged: pingedPeer\n        )\n        directives.forEach {\n            switch $0 {\n            case .gossipProcessed(let gossipDirective):\n                self.handleGossipPayloadProcessedDirective(gossipDirective)\n\n            case .alive(let previousStatus):\n                self.log.debug(\"Member [\\(pingedPeer.swimNode)] marked as alive\")\n\n                if previousStatus.isUnreachable, let member = swim.member(for: pingedPeer) {\n                    let event = SWIM.MemberStatusChangedEvent(previousStatus: previousStatus, member: member)  // FIXME: make SWIM emit an option of the event\n                    self.announceMembershipChange(event)\n                }\n\n            case .newlySuspect(let previousStatus, let suspect):\n                self.log.debug(\"Member [\\(suspect)] marked as suspect\")\n                let event = SWIM.MemberStatusChangedEvent(previousStatus: previousStatus, member: suspect)  // FIXME: make SWIM emit an option of the event\n                self.announceMembershipChange(event)\n\n            case .nackReceived:\n                self.log.debug(\"Received `nack` from indirect probing of [\\(pingedPeer)]\")\n            case let other:\n                self.log.trace(\"Handled ping request response, resulting directive: \\(other), was ignored.\")  // TODO: explicitly list all cases\n            }\n        }\n    }\n\n    private func announceMembershipChange(_ change: SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>) {\n        self.onMemberStatusChange(change)\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Sending ping, ping-req and friends\n\n    /// Send a `ping` message to the `target` peer.\n    ///\n    /// - parameters:\n    ///   - pingRequestOrigin: is set only when the ping that this is a reply to was originated as a `pingRequest`.\n    ///   - payload: the gossip payload to be sent with the `ping` message\n    ///   - sequenceNumber: sequence number to use for the `ping` message\n    func sendPing(\n        to target: SWIM.NIOPeer,\n        payload: SWIM.GossipPayload<SWIM.NIOPeer>,\n        pingRequestOrigin: SWIM.NIOPeer?,\n        pingRequestSequenceNumber: SWIM.SequenceNumber?,\n        timeout: Duration,\n        sequenceNumber: SWIM.SequenceNumber\n    ) async {\n        self.log.trace(\n            \"Sending ping\",\n            metadata: self.swim.metadata([\n                \"swim/target\": \"\\(target)\",\n                \"swim/gossip/payload\": \"\\(payload)\",\n                \"swim/timeout\": \"\\(timeout)\",\n            ])\n        )\n\n        self.tracelog(\n            .send(to: target),\n            message: \"ping(replyTo: \\(self.peer), payload: \\(payload), sequenceNr: \\(sequenceNumber))\"\n        )\n\n        do {\n            let response = try await target.ping(\n                payload: payload,\n                from: self.peer,\n                timeout: timeout,\n                sequenceNumber: sequenceNumber\n            )\n            self.receivePingResponse(\n                response: response,\n                pingRequestOriginPeer: pingRequestOrigin,\n                pingRequestSequenceNumber: pingRequestSequenceNumber\n            )\n        } catch let error as SWIMNIOTimeoutError {\n            self.receivePingResponse(\n                response: .timeout(\n                    target: target,\n                    pingRequestOrigin: pingRequestOrigin,\n                    timeout: error.timeout,\n                    sequenceNumber: sequenceNumber\n                ),\n                pingRequestOriginPeer: pingRequestOrigin,\n                pingRequestSequenceNumber: pingRequestSequenceNumber\n            )\n        } catch {\n            self.log.debug(\"Failed to ping\", metadata: [\"ping/target\": \"\\(target)\", \"error\": \"\\(error)\"])\n            self.receivePingResponse(\n                response: .timeout(\n                    target: target,\n                    pingRequestOrigin: pingRequestOrigin,\n                    timeout: timeout,\n                    sequenceNumber: sequenceNumber\n                ),\n                pingRequestOriginPeer: pingRequestOrigin,\n                pingRequestSequenceNumber: pingRequestSequenceNumber\n            )\n        }\n    }\n\n    func sendPingRequests(\n        _ directive: SWIM.Instance<SWIM.NIOPeer, SWIM.NIOPeer, SWIM.NIOPeer>.SendPingRequestDirective\n    ) async {\n        // We are only interested in successful pings, as a single success tells us the node is\n        // still alive. Therefore we propagate only the first success, but no failures.\n        // The failure case is handled through the timeout of the whole operation.\n        let firstSuccessPromise = self.eventLoop.makePromise(of: SWIM.PingResponse<SWIM.NIOPeer, SWIM.NIOPeer>.self)\n        let pingTimeout = directive.timeout\n        let target = directive.target\n        let startedSendingPingRequestsSentAt: DispatchTime = .now()\n\n        await withTaskGroup(of: Void.self) { group in\n            for pingRequest in directive.requestDetails {\n                group.addTask {\n                    let peerToPingRequestThrough = pingRequest.peerToPingRequestThrough\n                    let payload = pingRequest.payload\n                    let sequenceNumber = pingRequest.sequenceNumber\n\n                    self.log.trace(\n                        \"Sending ping request for [\\(target)] to [\\(peerToPingRequestThrough.swimNode)] with payload: \\(payload)\"\n                    )\n                    self.tracelog(\n                        .send(to: peerToPingRequestThrough),\n                        message:\n                            \"pingRequest(target: \\(target), replyTo: \\(self.peer), payload: \\(payload), sequenceNumber: \\(sequenceNumber))\"\n                    )\n\n                    let pingRequestSentAt: DispatchTime = .now()\n                    do {\n                        let response = try await peerToPingRequestThrough.pingRequest(\n                            target: target,\n                            payload: payload,\n                            from: self.peer,\n                            timeout: pingTimeout,\n                            sequenceNumber: sequenceNumber\n                        )\n\n                        // we only record successes\n                        self.swim.metrics.shell.pingRequestResponseTimeAll.recordInterval(since: pingRequestSentAt)\n                        self.receiveEveryPingRequestResponse(result: response, pingedPeer: target)\n\n                        if case .ack = response {\n                            // We only cascade successful ping responses (i.e. `ack`s);\n                            //\n                            // While this has a slight timing implication on time timeout of the pings -- the node that is last\n                            // in the list that we ping, has slightly less time to fulfil the \"total ping timeout\"; as we set a total timeout on the entire `firstSuccess`.\n                            // In practice those timeouts will be relatively large (seconds) and the few millis here should not have a large impact on correctness.\n                            firstSuccessPromise.succeed(response)\n                        }\n                    } catch {\n                        self.receiveEveryPingRequestResponse(\n                            result: .timeout(\n                                target: target,\n                                pingRequestOrigin: self.myself,\n                                timeout: pingTimeout,\n                                sequenceNumber: sequenceNumber\n                            ),\n                            pingedPeer: target\n                        )\n                        // these are generally harmless thus we do not want to log them on higher levels\n                        self.log.trace(\n                            \"Failed pingRequest\",\n                            metadata: [\n                                \"swim/target\": \"\\(target)\",\n                                \"swim/payload\": \"\\(payload)\",\n                                \"swim/pingTimeout\": \"\\(pingTimeout)\",\n                                \"error\": \"\\(error)\",\n                            ]\n                        )\n                    }\n                }\n            }\n        }\n\n        // guaranteed to be on \"our\" EL\n        firstSuccessPromise.futureResult.whenComplete { result in\n            switch result {\n            case .success(let response):\n                self.swim.metrics.shell.pingRequestResponseTimeFirst.recordInterval(\n                    since: startedSendingPingRequestsSentAt\n                )\n                self.receivePingRequestResponse(result: response, pingedPeer: target)\n\n            case .failure(let error):\n                self.log.debug(\n                    \"Failed to pingRequest via \\(directive.requestDetails.count) peers\",\n                    metadata: [\"pingRequest/target\": \"\\(target)\", \"error\": \"\\(error)\"]\n                )\n                self.receivePingRequestResponse(\n                    result: .timeout(target: target, pingRequestOrigin: nil, timeout: pingTimeout, sequenceNumber: 0),\n                    pingedPeer: target\n                )  // sequence number does not matter\n            }\n        }\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Handling local messages\n\n    /// Periodic (scheduled) function to ping (\"probe\") a random member.\n    ///\n    /// This is the heart of the periodic gossip performed by SWIM.\n    func handlePeriodicProtocolPeriodTick() {\n        self.eventLoop.assertInEventLoop()\n\n        let directives = self.swim.onPeriodicPingTick()\n        for directive in directives {\n            switch directive {\n            case .membershipChanged(let change):\n                self.tryAnnounceMemberReachability(change: change)\n\n            case .sendPing(let target, let payload, let timeout, let sequenceNumber):\n                self.log.trace(\n                    \"Periodic ping random member, among: \\(self.swim.otherMemberCount)\",\n                    metadata: self.swim.metadata\n                )\n                Task {\n                    await self.sendPing(\n                        to: target,\n                        payload: payload,\n                        pingRequestOrigin: nil,\n                        pingRequestSequenceNumber: nil,\n                        timeout: timeout,\n                        sequenceNumber: sequenceNumber\n                    )\n                }\n\n            case .scheduleNextTick(let delay):\n                self.nextPeriodicTickCancellable = self.schedule(delay: delay) {\n                    self.handlePeriodicProtocolPeriodTick()\n                }\n            }\n        }\n    }\n\n    /// Extra functionality, allowing external callers to ask this swim shell to start monitoring a specific node.\n    // TODO: Add some attempts:Int + maxAttempts: Int and handle them appropriately; https://github.com/apple/swift-cluster-membership/issues/32\n    private func receiveStartMonitoring(node: Node) {\n        guard self.eventLoop.inEventLoop else {\n            return self.eventLoop.execute {\n                self.receiveStartMonitoring(node: node)\n            }\n        }\n\n        guard self.node.withoutUID != node.withoutUID else {\n            return  // no need to monitor ourselves, nor a replacement of us (if node is our replacement, we should have been dead already)\n        }\n\n        let targetPeer = node.peer(on: self.channel)\n\n        guard !self.swim.isMember(targetPeer, ignoreUID: true) else {\n            return  // we're done, the peer has become a member!\n        }\n\n        let sequenceNumber = self.swim.nextSequenceNumber()\n        self.tracelog(\n            .send(to: targetPeer),\n            message: \"ping(replyTo: \\(self.peer), payload: .none, sequenceNr: \\(sequenceNumber))\"\n        )\n        Task {\n            do {\n                let response = try await targetPeer.ping(\n                    payload: self.swim.makeGossipPayload(to: nil),\n                    from: self.peer,\n                    timeout: .seconds(1),\n                    sequenceNumber: sequenceNumber\n                )\n                self.receivePingResponse(response: response, pingRequestOriginPeer: nil, pingRequestSequenceNumber: nil)\n            } catch {\n                self.log.debug(\n                    \"Failed to initial ping, will try again\",\n                    metadata: [\"ping/target\": \"\\(node)\", \"error\": \"\\(error)\"]\n                )\n                // TODO: implement via re-trying a few times and then giving up https://github.com/apple/swift-cluster-membership/issues/32\n                self.eventLoop.scheduleTask(in: .seconds(5)) {\n                    self.log.info(\"(Re)-Attempt ping to initial contact point: \\(node)\")\n                    self.receiveStartMonitoring(node: node)\n                }\n            }\n        }\n    }\n\n    // TODO: not presently used in the SWIMNIO + udp implementation, make use of it or remove? other impls do need this functionality.\n    private func receiveConfirmDead(deadNode node: Node) {\n        guard case .enabled = self.settings.swim.unreachability else {\n            self.log.warning(\n                \"Received confirm .dead for [\\(node)], however shell is not configured to use unreachable state, thus this results in no action.\"\n            )\n            return\n        }\n\n        // We are diverging from the SWIM paper here in that we store the `.dead` state, instead\n        // of removing the node from the member list. We do that in order to prevent dead nodes\n        // from being re-added to the cluster.\n        // TODO: add time of death to the status?\n\n        guard let member = swim.member(forNode: node) else {\n            self.log.warning(\n                \"Attempted to confirm .dead [\\(node)], yet no such member known\",\n                metadata: self.swim.metadata\n            )\n            return\n        }\n\n        // even if it's already dead, swim knows how to handle all the cases:\n        let directive = self.swim.confirmDead(peer: member.peer)\n        switch directive {\n        case .ignored:\n            self.log.warning(\n                \"Attempted to confirmDead node \\(node) was ignored, was already dead?\",\n                metadata: [\n                    \"swim/member\": \"\\(optional: swim.member(forNode: node))\"\n                ]\n            )\n\n        case .applied(let change):\n            self.log.trace(\n                \"Confirmed node as .dead\",\n                metadata: self.swim.metadata([\n                    \"swim/member\": \"\\(optional: swim.member(forNode: node))\"\n                ])\n            )\n            self.tryAnnounceMemberReachability(change: change)\n        }\n    }\n\n    func handleGossipPayloadProcessedDirective(\n        _ directive: SWIM.Instance<SWIM.NIOPeer, SWIM.NIOPeer, SWIM.NIOPeer>.GossipProcessedDirective\n    ) {\n        switch directive {\n        case .applied(let change):\n            self.tryAnnounceMemberReachability(change: change)\n        }\n    }\n\n    /// Announce to the a change in reachability of a member.\n    private func tryAnnounceMemberReachability(change: SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>?) {\n        guard let change = change else {\n            // this means it likely was a change to the same status or it was about us, so we do not need to announce anything\n            return\n        }\n\n        guard change.isReachabilityChange else {\n            // the change is from a reachable to another reachable (or an unreachable to another unreachable-like (e.g. dead) state),\n            // and thus we must not act on it, as the shell was already notified before about the change into the current status.\n            return\n        }\n\n        // emit the SWIM.MemberStatusChange as user event\n        self.announceMembershipChange(change)\n    }\n}\n\n/// Reachability indicates a failure detectors assessment of the member node's reachability,\n/// i.e. whether or not the node is responding to health check messages.\n///\n/// Unlike `MemberStatus` (which may only move \"forward\"), reachability may flip back and forth between `.reachable`\n/// and `.unreachable` states multiple times during the lifetime of a member.\n///\n/// - SeeAlso: `SWIM` for a distributed failure detector implementation which may issue unreachable events.\npublic enum MemberReachability: String, Equatable {\n    /// The member is reachable and responding to failure detector probing properly.\n    case reachable\n    /// Failure detector has determined this node as not reachable.\n    /// It may be a candidate to be downed.\n    case unreachable\n}\n\nstruct SWIMCancellable {\n    let cancel: () -> Void\n\n    init(_ cancel: @escaping () -> Void) {\n        self.cancel = cancel\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Peer \"resolve\"\n\nextension SWIMAddressablePeer {\n    /// Since we're an implementation over UDP, all messages are sent to the same channel anyway,\n    /// and simply wrapped in `NIO.AddressedEnvelope`, thus we can easily take any addressable and\n    /// convert it into a real NIO peer by simply providing the channel we're running on.\n    func peer(_ channel: Channel) -> SWIM.NIOPeer {\n        self.swimNode.peer(on: channel)\n    }\n}\n\nextension ClusterMembership.Node {\n    /// Since we're an implementation over UDP, all messages are sent to the same channel anyway,\n    /// and simply wrapped in `NIO.AddressedEnvelope`, thus we can easily take any addressable and\n    /// convert it into a real NIO peer by simply providing the channel we're running on.\n    func peer(on channel: Channel) -> SWIM.NIOPeer {\n        .init(node: self, channel: channel)\n    }\n}\n"
  },
  {
    "path": "Sources/SWIMNIOExample/Settings.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\nimport NIO\nimport SWIM\n\n/// Namespace for SWIMNIO constants.\npublic enum SWIMNIO {}\n\nextension SWIMNIO {\n    /// SWIMNIO specific settings.\n    public struct Settings {\n        /// Underlying settings for the SWIM protocol implementation.\n        public var swim: SWIM.Settings\n\n        public init() {\n            self.init(swim: .init())\n        }\n\n        public init(swim: SWIM.Settings) {\n            self.swim = swim\n            self.logger = swim.logger\n        }\n\n        /// The node as which this SWIMNIO shell should be bound.\n        ///\n        /// - SeeAlso: `SWIM.Settings.node`\n        public var node: Node? {\n            get {\n                self.swim.node\n            }\n            set {\n                self.swim.node = newValue\n            }\n        }\n\n        // ==== Settings specific to SWIMNIO ---------------------------------------------------------------------------\n\n        /// Allows for customizing the used logger.\n        /// By default the same as passed in `swim.logger` in the initializer is used.\n        public var logger: Logger\n\n        // TODO: retry initial contact points max count: https://github.com/apple/swift-cluster-membership/issues/32\n\n        /// How frequently the shell should retry attempting to join a `swim.initialContactPoint`\n        public var initialContactPointPingInterval: TimeAmount = .seconds(5)\n\n        /// For testing only, as it effectively disables the swim protocol period ticks.\n        ///\n        /// Allows for disabling of the periodically scheduled protocol period ticks.\n        internal var _startPeriodicPingTimer: Bool = true\n    }\n}\n"
  },
  {
    "path": "Sources/SWIMNIOExample/Utils/String+Extensions.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: String Interpolation: reflecting:\n\nextension String.StringInterpolation {\n    mutating func appendInterpolation(reflecting subject: Any?) {\n        self.appendLiteral(String(reflecting: subject))\n    }\n\n    mutating func appendInterpolation(reflecting subject: Any) {\n        self.appendLiteral(String(reflecting: subject))\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: String Interpolation: lineByLine:\n\nextension String.StringInterpolation {\n    mutating func appendInterpolation(lineByLine subject: [Any]) {\n        self.appendLiteral(\"\\n    \\(subject.map { \"\\($0)\" }.joined(separator: \"\\n    \"))\")\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: String Interpolation: _:orElse:\n\nextension String.StringInterpolation {\n    mutating func appendInterpolation<T>(_ value: T?, orElse defaultValue: String) {\n        self.appendLiteral(\"\\(value.map { \"\\($0)\" } ?? defaultValue)\")\n    }\n\n    mutating func appendInterpolation<T>(optional value: T?) {\n        self.appendLiteral(\"\\(value.map { \"\\($0)\" } ?? \"nil\")\")\n    }\n}\n"
  },
  {
    "path": "Sources/SWIMNIOExample/Utils/time.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport NIO\nimport SWIM\n\nextension Swift.Duration {\n    typealias Value = Int64\n\n    var nanoseconds: Value {\n        let (seconds, attoseconds) = self.components\n        let sNanos = seconds * Value(1_000_000_000)\n        let asNanos = attoseconds / Value(1_000_000_000)\n        let (totalNanos, overflow) = sNanos.addingReportingOverflow(asNanos)\n        return overflow ? .max : totalNanos\n    }\n\n    /// The microseconds representation of the `TimeAmount`.\n    var microseconds: Value {\n        self.nanoseconds / TimeUnit.microseconds.rawValue\n    }\n\n    /// The milliseconds representation of the `TimeAmount`.\n    var milliseconds: Value {\n        self.nanoseconds / TimeUnit.milliseconds.rawValue\n    }\n\n    /// The seconds representation of the `TimeAmount`.\n    var seconds: Value {\n        self.nanoseconds / TimeUnit.seconds.rawValue\n    }\n\n    var isEffectivelyInfinite: Bool {\n        self.nanoseconds == .max\n    }\n\n    var toNIO: NIO.TimeAmount {\n        .nanoseconds(self.nanoseconds)\n    }\n\n    /// Represents number of nanoseconds within given time unit\n    enum TimeUnit: Value {\n        case days = 86_400_000_000_000\n        case hours = 3_600_000_000_000\n        case minutes = 60_000_000_000\n        case seconds = 1_000_000_000\n        case milliseconds = 1_000_000\n        case microseconds = 1000\n        case nanoseconds = 1\n\n        var abbreviated: String {\n            switch self {\n            case .nanoseconds: return \"ns\"\n            case .microseconds: return \"μs\"\n            case .milliseconds: return \"ms\"\n            case .seconds: return \"s\"\n            case .minutes: return \"m\"\n            case .hours: return \"h\"\n            case .days: return \"d\"\n            }\n        }\n\n        func duration(_ duration: Int) -> Duration {\n            switch self {\n            case .nanoseconds: return .nanoseconds(Value(duration))\n            case .microseconds: return .microseconds(Value(duration))\n            case .milliseconds: return .milliseconds(Value(duration))\n            case .seconds: return .seconds(Value(duration))\n            case .minutes: return .seconds(Value(duration) * 60)\n            case .hours: return .seconds(Value(duration) * 60 * 60)\n            case .days: return .seconds(Value(duration) * 24 * 60 * 60)\n            }\n        }\n    }\n}\n\nprotocol PrettyTimeAmountDescription {\n    var nanoseconds: Int64 { get }\n    var isEffectivelyInfinite: Bool { get }\n\n    var prettyDescription: String { get }\n    func prettyDescription(precision: Int) -> String\n}\n\nextension PrettyTimeAmountDescription {\n    var prettyDescription: String {\n        self.prettyDescription()\n    }\n\n    func prettyDescription(precision: Int = 2) -> String {\n        assert(precision > 0, \"precision MUST BE > 0\")\n        if self.isEffectivelyInfinite {\n            return \"∞ (infinite)\"\n        }\n\n        var res = \"\"\n        var remainingNanos = self.nanoseconds\n\n        if remainingNanos < 0 {\n            res += \"-\"\n            remainingNanos = remainingNanos * -1\n        }\n\n        var i = 0\n        while i < precision {\n            let unit = self.chooseUnit(remainingNanos)\n\n            let rounded = Int(remainingNanos / unit.rawValue)\n            if rounded > 0 {\n                res += i > 0 ? \" \" : \"\"\n                res += \"\\(rounded)\\(unit.abbreviated)\"\n\n                remainingNanos = remainingNanos - unit.timeAmount(rounded).nanoseconds\n                i += 1\n            } else {\n                break\n            }\n        }\n\n        return res\n    }\n\n    private func chooseUnit(_ ns: Int64) -> PrettyTimeUnit {\n        if ns / PrettyTimeUnit.days.rawValue > 0 {\n            return PrettyTimeUnit.days\n        } else if ns / PrettyTimeUnit.hours.rawValue > 0 {\n            return PrettyTimeUnit.hours\n        } else if ns / PrettyTimeUnit.minutes.rawValue > 0 {\n            return PrettyTimeUnit.minutes\n        } else if ns / PrettyTimeUnit.seconds.rawValue > 0 {\n            return PrettyTimeUnit.seconds\n        } else if ns / PrettyTimeUnit.milliseconds.rawValue > 0 {\n            return PrettyTimeUnit.milliseconds\n        } else if ns / PrettyTimeUnit.microseconds.rawValue > 0 {\n            return PrettyTimeUnit.microseconds\n        } else {\n            return PrettyTimeUnit.nanoseconds\n        }\n    }\n}\n\n/// Represents number of nanoseconds within given time unit\nenum PrettyTimeUnit: Int64 {\n    case days = 86_400_000_000_000\n    case hours = 3_600_000_000_000\n    case minutes = 60_000_000_000\n    case seconds = 1_000_000_000\n    case milliseconds = 1_000_000\n    case microseconds = 1000\n    case nanoseconds = 1\n\n    var abbreviated: String {\n        switch self {\n        case .nanoseconds: return \"ns\"\n        case .microseconds: return \"μs\"\n        case .milliseconds: return \"ms\"\n        case .seconds: return \"s\"\n        case .minutes: return \"m\"\n        case .hours: return \"h\"\n        case .days: return \"d\"\n        }\n    }\n\n    func timeAmount(_ amount: Int) -> TimeAmount {\n        switch self {\n        case .nanoseconds: return .nanoseconds(Int64(amount))\n        case .microseconds: return .microseconds(Int64(amount))\n        case .milliseconds: return .milliseconds(Int64(amount))\n        case .seconds: return .seconds(Int64(amount))\n        case .minutes: return .minutes(Int64(amount))\n        case .hours: return .hours(Int64(amount))\n        case .days: return .hours(Int64(amount) * 24)\n        }\n    }\n}\n\nextension NIO.TimeAmount: PrettyTimeAmountDescription {\n    var isEffectivelyInfinite: Bool {\n        self.nanoseconds == .max\n    }\n}\n\nextension Swift.Duration: PrettyTimeAmountDescription {}\n"
  },
  {
    "path": "Tests/ClusterMembershipDocumentationTests/SWIMDocExamples.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\n// tag::imports[]\n\nimport SWIM\nimport XCTest\n\n// end::imports[]\n\nfinal class SWIMDocExamples: XCTestCase {}\n"
  },
  {
    "path": "Tests/ClusterMembershipTests/NodeTests.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2019 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport XCTest\n\n@testable import ClusterMembership\n\nfinal class NodeTests: XCTestCase {\n    let firstNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7001, uid: 1111)\n    let secondNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7002, uid: 2222)\n    let thirdNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.2\", port: 7001, uid: 3333)\n\n    func testCompareSameProtocolAndHost() throws {\n        XCTAssertLessThan(self.firstNode, self.secondNode)\n        XCTAssertGreaterThan(self.secondNode, self.firstNode)\n        XCTAssertNotEqual(self.firstNode, self.secondNode)\n    }\n\n    func testCompareDifferentHost() throws {\n        XCTAssertLessThan(self.firstNode, self.thirdNode)\n        XCTAssertGreaterThan(self.thirdNode, self.firstNode)\n        XCTAssertNotEqual(self.firstNode, self.thirdNode)\n        XCTAssertLessThan(self.secondNode, self.thirdNode)\n        XCTAssertGreaterThan(self.thirdNode, self.secondNode)\n    }\n\n    func testSort() throws {\n        let nodes: Set<ClusterMembership.Node> = [secondNode, firstNode, thirdNode]\n        let sorted_nodes = nodes.sorted()\n\n        XCTAssertEqual(sorted_nodes, [self.firstNode, self.secondNode, self.thirdNode])\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMNIOExampleTests/CodingTests.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2019 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Foundation\nimport NIO\nimport SWIM\nimport XCTest\n\n@testable import SWIMNIOExample\n\nfinal class CodingTests: XCTestCase {\n    lazy var nioPeer: SWIM.NIOPeer = SWIM.NIOPeer(\n        node: .init(protocol: \"udp\", host: \"127.0.0.1\", port: 1111, uid: 12121),\n        channel: EmbeddedChannel()\n    )\n    lazy var nioPeerOther: SWIM.NIOPeer = SWIM.NIOPeer(\n        node: .init(protocol: \"udp\", host: \"127.0.0.1\", port: 2222, uid: 234_324),\n        channel: EmbeddedChannel()\n    )\n\n    lazy var memberOne = SWIM.Member(peer: nioPeer, status: .alive(incarnation: 1), protocolPeriod: 0)\n    lazy var memberTwo = SWIM.Member(peer: nioPeer, status: .alive(incarnation: 2), protocolPeriod: 0)\n    lazy var memberThree = SWIM.Member(peer: nioPeer, status: .alive(incarnation: 2), protocolPeriod: 0)\n\n    // TODO: add some more \"nasty\" cases, since the node parsing code is very manual and not hardened / secure\n    func test_serializationOf_node() throws {\n        try self.shared_serializationRoundtrip(\n            ContainsNode(node: Node(protocol: \"udp\", host: \"127.0.0.1\", port: 1111, uid: 12121))\n        )\n        try self.shared_serializationRoundtrip(\n            ContainsNode(node: Node(protocol: \"udp\", host: \"127.0.0.1\", port: 1111, uid: nil))\n        )\n        try self.shared_serializationRoundtrip(\n            ContainsNode(node: Node(protocol: \"udp\", host: \"127.0.0.1\", port: 1111, uid: .random(in: 0...UInt64.max)))\n        )\n        try self.shared_serializationRoundtrip(\n            Node(protocol: \"udp\", host: \"127.0.0.1\", port: 1111, uid: .random(in: 0...UInt64.max))\n        )\n\n        // with name\n        try self.shared_serializationRoundtrip(\n            Node(protocol: \"udp\", name: \"kappa\", host: \"127.0.0.1\", port: 2222, uid: .random(in: 0...UInt64.max))\n        )\n    }\n\n    func test_serializationOf_peer() throws {\n        try self.shared_serializationRoundtrip(ContainsPeer(peer: self.nioPeer))\n    }\n\n    func test_serializationOf_member() throws {\n        try self.shared_serializationRoundtrip(ContainsMember(member: self.memberOne))\n    }\n\n    func test_serializationOf_ping() throws {\n        let payloadSome: SWIM.GossipPayload = .membership([\n            self.memberOne,\n            self.memberTwo,\n            self.memberThree,\n        ])\n        try self.shared_serializationRoundtrip(\n            SWIM.Message.ping(replyTo: self.nioPeer, payload: payloadSome, sequenceNumber: 1212)\n        )\n    }\n\n    func test_serializationOf_pingReq() throws {\n        let payloadNone: SWIM.GossipPayload<SWIM.NIOPeer> = .none\n        try self.shared_serializationRoundtrip(\n            SWIM.Message.pingRequest(\n                target: self.nioPeer,\n                replyTo: self.nioPeerOther,\n                payload: payloadNone,\n                sequenceNumber: 111\n            )\n        )\n\n        let payloadSome: SWIM.GossipPayload = .membership([\n            self.memberOne,\n            self.memberTwo,\n            self.memberThree,\n        ])\n        try self.shared_serializationRoundtrip(\n            SWIM.Message.pingRequest(\n                target: self.nioPeer,\n                replyTo: self.nioPeerOther,\n                payload: payloadSome,\n                sequenceNumber: 1212\n            )\n        )\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Utils\n\n    func shared_serializationRoundtrip<T: Codable>(_ obj: T) throws {\n        let repr = try SWIMNIODefaultEncoder().encode(obj)\n        let decoder = SWIMNIODefaultDecoder()\n        decoder.userInfo[.channelUserInfoKey] = EmbeddedChannel()\n        let deserialized = try decoder.decode(T.self, from: repr)\n\n        XCTAssertEqual(\"\\(obj)\", \"\\(deserialized)\")\n    }\n}\n\n// This is a workaround until Swift 5.2.5 is available with the \"top level string value encoding\" support.\nstruct ContainsPeer: Codable {\n    let peer: SWIM.NIOPeer\n}\n\n// This is a workaround until Swift 5.2.5 is available with the \"top level string value encoding\" support.\nstruct ContainsMember: Codable {\n    let member: SWIM.Member<SWIM.NIOPeer>\n}\n\n// This is a workaround until Swift 5.2.5 is available with the \"top level string value encoding\" support.\nstruct ContainsNode: Codable {\n    let node: ClusterMembership.Node\n}\n"
  },
  {
    "path": "Tests/SWIMNIOExampleTests/SWIMNIOClusteredTests.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2019 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\nimport NIO\nimport SWIM\nimport SWIMTestKit\nimport XCTest\n\n@testable import SWIMNIOExample\n\nfinal class SWIMNIOClusteredTests: RealClusteredXCTestCase {\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: White box tests // TODO: implement more of the tests in terms of inspecting events\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Black box tests, we let the nodes run and inspect their state via logs\n\n    func test_real_peers_2_connect() throws {\n        let (firstHandler, _) = self.makeClusterNode()\n\n        let (secondHandler, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [firstHandler.shell.node]\n        }\n\n        try self.capturedLogs(of: firstHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/members/count\": 2\"#)\n        try self.capturedLogs(of: secondHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/members/count\": 2\"#)\n    }\n\n    func test_real_peers_2_connect_first_terminates() throws {\n        let (firstHandler, firstChannel) = self.makeClusterNode { settings in\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n\n        let (secondHandler, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [firstHandler.shell.node]\n\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n\n        try self.capturedLogs(of: firstHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/members/count\": 2\"#)\n\n        // close first channel\n        firstHandler.log.warning(\"Stopping \\(firstHandler.shell.node)...\")\n        secondHandler.log.warning(\"Stopping \\(firstHandler.shell.node)...\")\n        try firstChannel.close().wait()\n\n        // we should get back down to a 1 node cluster\n        // TODO: add same tests but embedded\n        try self.capturedLogs(of: secondHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/suspects/count\": 1\"#, within: .seconds(20))\n    }\n\n    func test_real_peers_2_connect_peerCountNeverExceeds2() throws {\n        let (firstHandler, _) = self.makeClusterNode { settings in\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n\n        let (secondHandler, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [firstHandler.shell.node]\n\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n\n        try self.capturedLogs(of: firstHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/members/count\": 2\"#)\n\n        sleep(5)\n\n        do {\n            let found = try self.capturedLogs(of: secondHandler.shell.node)\n                .awaitLog(grep: #\"\"swim/members/count\": 3\"#, within: .seconds(5))\n            XCTFail(\"Found unexpected members count: 3! Log message: \\(found)\")\n            return\n        } catch {\n            ()  // good!\n        }\n    }\n\n    func test_real_peers_5_connect() throws {\n        let (first, _) = self.makeClusterNode { settings in\n            settings.swim.probeInterval = .milliseconds(200)\n        }\n        let (second, _) = self.makeClusterNode { settings in\n            settings.swim.probeInterval = .milliseconds(200)\n            settings.swim.initialContactPoints = [first.shell.node]\n        }\n        let (third, _) = self.makeClusterNode { settings in\n            settings.swim.probeInterval = .milliseconds(200)\n            settings.swim.initialContactPoints = [second.shell.node]\n        }\n        let (fourth, _) = self.makeClusterNode { settings in\n            settings.swim.probeInterval = .milliseconds(200)\n            settings.swim.initialContactPoints = [third.shell.node]\n        }\n        let (fifth, _) = self.makeClusterNode { settings in\n            settings.swim.probeInterval = .milliseconds(200)\n            settings.swim.initialContactPoints = [fourth.shell.node]\n        }\n\n        try [first, second, third, fourth, fifth].forEach { handler in\n            do {\n                try self.capturedLogs(of: handler.shell.node)\n                    .awaitLog(\n                        grep: #\"\"swim/members/count\": 5\"#,\n                        within: .seconds(5)\n                    )\n            } catch {\n                throw TestError(\"Failed to find expected logs on \\(handler.shell.node)\", error: error)\n            }\n        }\n    }\n\n    func test_real_peers_5_connect_butSlowly() throws {\n        let (first, _) = self.makeClusterNode { settings in\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n        let (second, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [first.shell.node]\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n        // we sleep in order to ensure we exhaust the \"gossip at most ... times\" logic\n        sleep(4)\n        let (third, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [second.shell.node]\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n        let (fourth, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [third.shell.node]\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n        // after joining two more, we sleep again to make sure they all exhaust their gossip message counts\n        sleep(2)\n        let (fifth, _) = self.makeClusterNode { settings in\n            // we connect fir the first, they should exchange all information\n            settings.swim.initialContactPoints = [\n                first.shell.node,\n                fourth.shell.node,\n            ]\n        }\n\n        try [first, second, third, fourth, fifth].forEach { handler in\n            do {\n                try self.capturedLogs(of: handler.shell.node)\n                    .awaitLog(\n                        grep: #\"\"swim/members/count\": 5\"#,\n                        within: .seconds(5)\n                    )\n            } catch {\n                throw TestError(\"Failed to find expected logs on \\(handler.shell.node)\", error: error)\n            }\n        }\n    }\n\n    func test_real_peers_5_then1Dies_becomesSuspect() throws {\n        let (first, firstChannel) = self.makeClusterNode { settings in\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n        let (second, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [first.shell.node]\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n        let (third, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [second.shell.node]\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n        let (fourth, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [third.shell.node]\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n        let (fifth, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [fourth.shell.node]\n            settings.swim.pingTimeout = .milliseconds(100)\n            settings.swim.probeInterval = .milliseconds(500)\n        }\n\n        try [first, second, third, fourth, fifth].forEach { handler in\n            do {\n                try self.capturedLogs(of: handler.shell.node)\n                    .awaitLog(\n                        grep: #\"\"swim/members/count\": 5\"#,\n                        within: .seconds(20)\n                    )\n            } catch {\n                throw TestError(\"Failed to find expected logs on \\(handler.shell.node)\", error: error)\n            }\n        }\n\n        try firstChannel.close().wait()\n\n        try [second, third, fourth, fifth].forEach { handler in\n            do {\n                try self.capturedLogs(of: handler.shell.node)\n                    .awaitLog(\n                        grep: #\"\"swim/suspects/count\": 1\"#,\n                        within: .seconds(10)\n                    )\n            } catch {\n                throw TestError(\"Failed to find expected logs on \\(handler.shell.node)\", error: error)\n            }\n        }\n    }\n\n    // ==== ----------------------------------------------------------------------------------------------------------------\n    // MARK: nack tests\n\n    func test_real_pingRequestsGetSent_nacksArriveBack() throws {\n        let (firstHandler, _) = self.makeClusterNode()\n        let (secondHandler, _) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [firstHandler.shell.node]\n        }\n        let (thirdHandler, thirdChannel) = self.makeClusterNode { settings in\n            settings.swim.initialContactPoints = [firstHandler.shell.node, secondHandler.shell.node]\n        }\n\n        try self.capturedLogs(of: firstHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/members/count\": 3\"#)\n        try self.capturedLogs(of: secondHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/members/count\": 3\"#)\n        try self.capturedLogs(of: thirdHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/members/count\": 3\"#)\n\n        try thirdChannel.close().wait()\n\n        try self.capturedLogs(of: firstHandler.shell.node)\n            .awaitLog(grep: \"Read successful: response/nack\")\n        try self.capturedLogs(of: secondHandler.shell.node)\n            .awaitLog(grep: \"Read successful: response/nack\")\n\n        try self.capturedLogs(of: firstHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/suspects/count\": 1\"#)\n        try self.capturedLogs(of: secondHandler.shell.node)\n            .awaitLog(grep: #\"\"swim/suspects/count\": 1\"#)\n    }\n}\n\nprivate struct TestError: Error {\n    let message: String\n    let error: Error\n\n    init(_ message: String, error: Error) {\n        self.message = message\n        self.error = error\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMNIOExampleTests/SWIMNIOEventClusteredTests.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2019 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport NIO\nimport SWIM\nimport SWIMTestKit\nimport XCTest\n\n@testable import SWIMNIOExample\n\n// TODO: those tests could be done on embedded event loops probably\nfinal class SWIMNIOEventClusteredTests: EmbeddedClusteredXCTestCase {\n    var settings: SWIMNIO.Settings = SWIMNIO.Settings(swim: .init())\n    lazy var myselfNode = Node(protocol: \"udp\", host: \"127.0.0.1\", port: 7001, uid: 1111)\n    lazy var myselfPeer = SWIM.NIOPeer(node: myselfNode, channel: EmbeddedChannel())\n    lazy var myselfMemberAliveInitial = SWIM.Member(peer: myselfPeer, status: .alive(incarnation: 0), protocolPeriod: 0)\n\n    var group: MultiThreadedEventLoopGroup!\n\n    override func setUp() {\n        super.setUp()\n\n        self.settings.node = self.myselfNode\n\n        self.group = MultiThreadedEventLoopGroup(numberOfThreads: 1)\n    }\n\n    override func tearDown() {\n        try! self.group.syncShutdownGracefully()\n        self.group = nil\n        super.tearDown()\n    }\n\n    func test_memberStatusChange_alive_emittedForMyself() throws {\n        let firstProbe = ProbeEventHandler(loop: group.next())\n\n        let first = try bindShell(probe: firstProbe) { settings in\n            settings.node = self.myselfNode\n        }\n        defer { try! first.close().wait() }\n\n        try firstProbe.expectEvent(\n            SWIM.MemberStatusChangedEvent(previousStatus: nil, member: self.myselfMemberAliveInitial)\n        )\n    }\n\n    func test_memberStatusChange_suspect_emittedForDyingNode() throws {\n        let firstProbe = ProbeEventHandler(loop: group.next())\n        let secondProbe = ProbeEventHandler(loop: group.next())\n\n        let secondNodePort = 7002\n        let secondNode = Node(protocol: \"udp\", host: \"127.0.0.1\", port: secondNodePort, uid: 222_222)\n\n        let second = try bindShell(probe: secondProbe) { settings in\n            settings.node = secondNode\n        }\n\n        let first = try bindShell(probe: firstProbe) { settings in\n            settings.node = self.myselfNode\n            settings.swim.initialContactPoints = [secondNode.withoutUID]\n        }\n        defer { try! first.close().wait() }\n\n        // wait for second probe to become alive:\n        try secondProbe.expectEvent(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: nil,\n                member: SWIM.Member(\n                    peer: SWIM.NIOPeer(node: secondNode, channel: EmbeddedChannel()),\n                    status: .alive(incarnation: 0),\n                    protocolPeriod: 0\n                )\n            )\n        )\n\n        sleep(5)  // let them discover each other, since the nodes are slow at retrying and we didn't configure it yet a sleep is here meh\n        try! second.close().wait()\n\n        try firstProbe.expectEvent(\n            SWIM.MemberStatusChangedEvent(previousStatus: nil, member: self.myselfMemberAliveInitial)\n        )\n\n        let secondAliveEvent = try firstProbe.expectEvent()\n        XCTAssertTrue(secondAliveEvent.isReachabilityChange)\n        XCTAssertTrue(secondAliveEvent.status.isAlive)\n        XCTAssertEqual(secondAliveEvent.member.node.withoutUID, secondNode.withoutUID)\n\n        let secondDeadEvent = try firstProbe.expectEvent()\n        XCTAssertTrue(secondDeadEvent.isReachabilityChange)\n        XCTAssertTrue(secondDeadEvent.status.isDead)\n        XCTAssertEqual(secondDeadEvent.member.node.withoutUID, secondNode.withoutUID)\n    }\n\n    private func bindShell(\n        probe probeHandler: ProbeEventHandler,\n        configure: (inout SWIMNIO.Settings) -> Void = { _ in () }\n    ) throws -> Channel {\n        var settings = self.settings\n        configure(&settings)\n        self.makeLogCapture(name: \"swim-\\(settings.node!.port)\", settings: &settings)\n\n        self._nodes.append(settings.node!)\n        return try DatagramBootstrap(group: self.group)\n            .channelOption(ChannelOptions.socketOption(.so_reuseaddr), value: 1)\n            .channelInitializer { channel in\n\n                let swimHandler = SWIMNIOHandler(settings: settings)\n                return channel.pipeline.addHandler(swimHandler).flatMap { _ in\n                    channel.pipeline.addHandler(probeHandler)\n                }\n            }.bind(host: settings.node!.host, port: settings.node!.port).wait()\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Test Utils\n\nextension ProbeEventHandler {\n    @discardableResult\n    func expectEvent(\n        _ expected: SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>? = nil,\n        file: StaticString = (#file),\n        line: UInt = #line\n    ) throws -> SWIM.MemberStatusChangedEvent<SWIM.NIOPeer> {\n        let got = try self.expectEvent()\n\n        if let expected = expected {\n            XCTAssertEqual(got, expected, file: file, line: line)\n        }\n\n        return got\n    }\n}\n\nfinal class ProbeEventHandler: ChannelInboundHandler {\n    typealias InboundIn = SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>\n\n    var events: [SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>] = []\n    var waitingPromise: EventLoopPromise<SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>>?\n    var loop: EventLoop\n\n    init(loop: EventLoop) {\n        self.loop = loop\n    }\n\n    func channelRead(context: ChannelHandlerContext, data: NIOAny) {\n        let change = self.unwrapInboundIn(data)\n        self.events.append(change)\n\n        if let probePromise = self.waitingPromise {\n            let event = self.events.removeFirst()\n            probePromise.succeed(event)\n            self.waitingPromise = nil\n        }\n    }\n\n    func expectEvent(\n        file: StaticString = #file,\n        line: UInt = #line\n    ) throws -> SWIM.MemberStatusChangedEvent<SWIM.NIOPeer> {\n        let p = self.loop.makePromise(of: SWIM.MemberStatusChangedEvent<SWIM.NIOPeer>.self, file: file, line: line)\n        self.loop.execute {\n            assert(self.waitingPromise == nil, \"Already waiting on an event\")\n            if !self.events.isEmpty {\n                let event = self.events.removeFirst()\n                p.succeed(event)\n            } else {\n                self.waitingPromise = p\n            }\n        }\n        return try p.futureResult.wait()\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMNIOExampleTests/SWIMNIOMetricsTests.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Dispatch\nimport Metrics\nimport NIO\nimport SWIMTestKit\nimport XCTest\n\n@testable import CoreMetrics\n@testable import SWIM\n@testable import SWIMNIOExample\n\nfinal class SWIMNIOMetricsTests: RealClusteredXCTestCase {\n    var testMetrics: TestMetrics!\n\n    override func setUp() {\n        super.setUp()\n\n        self.testMetrics = TestMetrics()\n        MetricsSystem.bootstrapInternal(self.testMetrics)\n    }\n\n    override func tearDown() {\n        super.tearDown()\n        MetricsSystem.bootstrapInternal(NOOPMetricsHandler.instance)\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Metrics tests\n\n    func test_metrics_emittedByNIOImplementation() throws {\n        let (firstHandler, _) = self.makeClusterNode { settings in\n            settings.swim.metrics.labelPrefix = \"first\"\n            settings.swim.probeInterval = .milliseconds(100)\n        }\n        _ = self.makeClusterNode { settings in\n            settings.swim.metrics.labelPrefix = \"second\"\n            settings.swim.probeInterval = .milliseconds(100)\n            settings.swim.initialContactPoints = [firstHandler.shell.node]\n        }\n        let (_, thirdChannel) = self.makeClusterNode { settings in\n            settings.swim.metrics.labelPrefix = \"third\"\n            settings.swim.probeInterval = .milliseconds(100)\n            settings.swim.initialContactPoints = [firstHandler.shell.node]\n        }\n\n        sleep(1)  // giving it some extra time to report a few metrics (a few round-trip times etc).\n\n        let m: SWIM.Metrics.ShellMetrics = firstHandler.metrics!\n\n        let roundTripTime = try! self.testMetrics.expectTimer(m.pingResponseTime)\n        XCTAssertNotNil(roundTripTime.lastValue)  // some roundtrip time should have been reported\n        for rtt in roundTripTime.values {\n            print(\"  ping rtt recorded: \\(TimeAmount.nanoseconds(rtt).prettyDescription)\")\n        }\n\n        let messageInboundCount = try! self.testMetrics.expectCounter(m.messageInboundCount)\n        let messageInboundBytes = try! self.testMetrics.expectRecorder(m.messageInboundBytes)\n        print(\"  messageInboundCount = \\(messageInboundCount.totalValue)\")\n        print(\"  messageInboundBytes = \\(messageInboundBytes.lastValue!)\")\n        XCTAssertGreaterThan(messageInboundCount.totalValue, 0)\n        XCTAssertGreaterThan(messageInboundBytes.lastValue!, 0)\n\n        let messageOutboundCount = try! self.testMetrics.expectCounter(m.messageOutboundCount)\n        let messageOutboundBytes = try! self.testMetrics.expectRecorder(m.messageOutboundBytes)\n        print(\"  messageOutboundCount = \\(messageOutboundCount.totalValue)\")\n        print(\"  messageOutboundBytes = \\(messageOutboundBytes.lastValue!)\")\n        XCTAssertGreaterThan(messageOutboundCount.totalValue, 0)\n        XCTAssertGreaterThan(messageOutboundBytes.lastValue!, 0)\n\n        thirdChannel.close(promise: nil)\n        sleep(2)\n\n        let pingRequestResponseTimeAll = try! self.testMetrics.expectTimer(m.pingRequestResponseTimeAll)\n        print(\"  pingRequestResponseTimeAll = \\(pingRequestResponseTimeAll.lastValue!)\")\n        XCTAssertGreaterThan(pingRequestResponseTimeAll.lastValue!, 0)\n\n        let pingRequestResponseTimeFirst = try! self.testMetrics.expectTimer(m.pingRequestResponseTimeFirst)\n        XCTAssertNil(pingRequestResponseTimeFirst.lastValue)  // because this only counts ACKs, and we get NACKs because the peer is down\n\n        let successfulPingProbes = try! self.testMetrics.expectCounter(\n            firstHandler.shell.swim.metrics.successfulPingProbes\n        )\n        print(\"  successfulPingProbes = \\(successfulPingProbes.totalValue)\")\n        XCTAssertGreaterThan(successfulPingProbes.totalValue, 1)  // definitely at least one, we joined some nodes\n\n        let failedPingProbes = try! self.testMetrics.expectCounter(firstHandler.shell.swim.metrics.failedPingProbes)\n        print(\"  failedPingProbes = \\(failedPingProbes.totalValue)\")\n        XCTAssertGreaterThan(failedPingProbes.totalValue, 1)  // definitely at least one, we detected the down peer\n\n        let successfulPingRequestProbes = try! self.testMetrics.expectCounter(\n            firstHandler.shell.swim.metrics.successfulPingRequestProbes\n        )\n        print(\"  successfulPingRequestProbes = \\(successfulPingRequestProbes.totalValue)\")\n        XCTAssertGreaterThan(successfulPingRequestProbes.totalValue, 1)  // definitely at least one, the second peer is alive and .nacks us, so we count that as success\n\n        let failedPingRequestProbes = try! self.testMetrics.expectCounter(\n            firstHandler.shell.swim.metrics.failedPingRequestProbes\n        )\n        print(\"  failedPingRequestProbes = \\(failedPingRequestProbes.totalValue)\")\n        XCTAssertEqual(failedPingRequestProbes.totalValue, 0)  // 0 because the second peer is still responsive to us, even it third is dead\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMNIOExampleTests/Utils/BaseXCTestCases.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2022 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Logging\nimport NIO\nimport NIOCore\nimport SWIM\nimport SWIMTestKit\nimport XCTest\n\nimport struct Foundation.Date\nimport class Foundation.NSLock\n\n@testable import SWIMNIOExample\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Real Networking Test Case\n\nclass RealClusteredXCTestCase: BaseClusteredXCTestCase {\n    var group: MultiThreadedEventLoopGroup!\n    var loop: EventLoop!\n\n    override func setUp() {\n        super.setUp()\n\n        self.group = MultiThreadedEventLoopGroup(numberOfThreads: 8)\n        self.loop = group.next()\n    }\n\n    override func tearDown() {\n        super.tearDown()\n\n        try! self.group.syncShutdownGracefully()\n        self.group = nil\n        self.loop = nil\n    }\n\n    func makeClusterNode(\n        name: String? = nil,\n        configure configureSettings: (inout SWIMNIO.Settings) -> Void = { _ in () }\n    ) -> (SWIMNIOHandler, Channel) {\n        let port = self.nextPort()\n        let name = name ?? \"swim-\\(port)\"\n        var settings = SWIMNIO.Settings()\n        configureSettings(&settings)\n\n        if self.captureLogs {\n            self.makeLogCapture(name: name, settings: &settings)\n        }\n\n        let handler = SWIMNIOHandler(settings: settings)\n        let bootstrap = DatagramBootstrap(group: self.group)\n            .channelOption(ChannelOptions.socketOption(.so_reuseaddr), value: 1)\n            .channelInitializer { channel in channel.pipeline.addHandler(handler) }\n\n        let channel = try! bootstrap.bind(host: \"127.0.0.1\", port: port).wait()\n\n        self._shells.append(handler.shell)\n        self._nodes.append(handler.shell.node)\n\n        return (handler, channel)\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Embedded Networking Test Case\n\nclass EmbeddedClusteredXCTestCase: BaseClusteredXCTestCase {\n    var loop: EmbeddedEventLoop!\n\n    open override func setUp() {\n        super.setUp()\n\n        self.loop = EmbeddedEventLoop()\n    }\n\n    open override func tearDown() {\n        super.tearDown()\n\n        try! self.loop.close()\n        self.loop = nil\n    }\n\n    func makeEmbeddedShell(\n        _ _name: String? = nil,\n        configure: (inout SWIMNIO.Settings) -> Void = { _ in () }\n    ) -> SWIMNIOShell {\n        var settings = SWIMNIO.Settings()\n        configure(&settings)\n        let node: Node\n        if let _node = settings.swim.node {\n            node = _node\n        } else {\n            let port = self.nextPort()\n            let name = _name ?? \"swim-\\(port)\"\n            node = Node(protocol: \"test\", name: name, host: \"127.0.0.1\", port: port, uid: .random(in: 1..<UInt64.max))\n        }\n\n        if self.captureLogs {\n            self.makeLogCapture(name: node.name ?? \"swim-\\(node.port)\", settings: &settings)\n        }\n\n        let channel = EmbeddedChannel(loop: self.loop)\n        channel.isWritable = true\n        let shell = SWIMNIOShell(\n            node: node,\n            settings: settings,\n            channel: channel,\n            onMemberStatusChange: { _ in () }  // TODO: store events so we can inspect them?\n        )\n\n        self._nodes.append(shell.node)\n        self._shells.append(shell)\n\n        return shell\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Base\n\nclass BaseClusteredXCTestCase: XCTestCase {\n    public internal(set) var _nodes: [Node] = []\n    public internal(set) var _shells: [SWIMNIOShell] = []\n    public internal(set) var _logCaptures: [LogCapture] = []\n\n    /// If `true` automatically captures all logs of all `setUpNode` started systems, and prints them if at least one test failure is encountered.\n    /// If `false`, log capture is disabled and the systems will log messages normally.\n    ///\n    /// - Default: `true`\n    open var captureLogs: Bool {\n        true\n    }\n\n    /// Enables logging all captured logs, even if the test passed successfully.\n    /// - Default: `false`\n    open var alwaysPrintCaptureLogs: Bool {\n        false\n    }\n\n    var _nextPort = 9001\n    open func nextPort() -> Int {\n        defer { self._nextPort += 1 }\n        return self._nextPort\n    }\n\n    open func configureLogCapture(settings: inout LogCapture.Settings) {\n        // just use defaults\n    }\n\n    open override func setUp() {\n        super.setUp()\n\n        self.addTeardownBlock {\n            for shell in self._shells {\n                do {\n                    try await shell.myself.channel.close()\n                } catch {\n                    ()  // channel was already closed, that's okey (e.g. we closed it in the test to \"crash\" a node)\n                }\n            }\n        }\n    }\n\n    open override func tearDown() {\n        super.tearDown()\n\n        let testsFailed = self.testRun?.totalFailureCount ?? 0 > 0\n        if self.captureLogs, self.alwaysPrintCaptureLogs || testsFailed {\n            self.printAllCapturedLogs()\n        }\n\n        self._nodes = []\n        self._logCaptures = []\n    }\n\n    func makeLogCapture(name: String, settings: inout SWIMNIO.Settings) {\n        var captureSettings = LogCapture.Settings()\n        self.configureLogCapture(settings: &captureSettings)\n        let capture = LogCapture(settings: captureSettings)\n\n        settings.logger = capture.logger(label: name)\n\n        self._logCaptures.append(capture)\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Captured Logs\n\nextension BaseClusteredXCTestCase {\n    public func capturedLogs(of node: Node) -> LogCapture {\n        guard let index = self._nodes.firstIndex(of: node) else {\n            fatalError(\"No such node: [\\(node)] in [\\(self._nodes)]!\")\n        }\n\n        return self._logCaptures[index]\n    }\n\n    public func printCapturedLogs(of node: Node) {\n        print(\"------------------------------------- \\(node) ------------------------------------------------\")\n        self.capturedLogs(of: node).printLogs()\n        print(\n            \"========================================================================================================================\"\n        )\n    }\n\n    public func printAllCapturedLogs() {\n        for node in self._nodes {\n            self.printCapturedLogs(of: node)\n        }\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMTestKit/LogCapture.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2019 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport NIO\nimport XCTest\n\nimport struct Foundation.Date\nimport class Foundation.NSLock\n\n@testable import Logging\n\n/// Testing only utility: Captures all log statements for later inspection.\npublic final class LogCapture {\n    private var _logs: [CapturedLogMessage] = []\n    private let lock = NSLock()\n\n    let settings: Settings\n    private var captureLabel: String = \"\"\n\n    public init(settings: Settings = .init()) {\n        self.settings = settings\n    }\n\n    public func logger(label: String) -> Logger {\n        self.lock.lock()\n        defer {\n            self.lock.unlock()\n        }\n\n        self.captureLabel = label\n        return Logger(label: \"LogCapture(\\(label))\", LogCaptureLogHandler(label: label, self))\n    }\n\n    func append(_ log: CapturedLogMessage) {\n        self.lock.lock()\n        defer {\n            self.lock.unlock()\n        }\n\n        self._logs.append(log)\n    }\n\n    public var logs: [CapturedLogMessage] {\n        self.lock.lock()\n        defer {\n            self.lock.unlock()\n        }\n\n        return self._logs\n    }\n\n    @discardableResult\n    public func awaitLog(\n        grep: String,\n        within: TimeAmount = .seconds(10),\n        file: StaticString = #file,\n        line: UInt = #line,\n        column: UInt = #column\n    ) throws -> CapturedLogMessage {\n        let startTime = DispatchTime.now()\n        let deadline = startTime.uptimeNanoseconds + UInt64(within.nanoseconds)\n        func timeExceeded() -> Bool {\n            DispatchTime.now().uptimeNanoseconds > deadline\n        }\n        while !timeExceeded() {\n            let logs = self.logs\n            if let log = logs.first(where: { log in \"\\(log)\".contains(grep) }) {\n                return log  // ok, found it!\n            }\n\n            sleep(1)\n        }\n\n        throw LogCaptureError(\n            message: \"After \\(within), logs still did not contain: [\\(grep)]\",\n            file: file,\n            line: line,\n            column: column\n        )\n    }\n}\n\nextension LogCapture {\n    public struct Settings {\n        public init() {}\n\n        public var minimumLogLevel: Logger.Level = .trace\n\n        public var grep: Set<String> = []\n\n        /// Do not capture log messages which include the following strings.\n        public var excludeGrep: Set<String> = []\n\n        public var ignoredMetadata: Set<String> = []\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: XCTest integrations and helpers\n\n/// ### Warning\n/// This handler uses locks for each and every operation.\nextension LogCapture {\n    public func printIfFailed(_ testRun: XCTestRun?) {\n        if let failureCount = testRun?.failureCount, failureCount > 0 {\n            print(\n                \"------------------------------------------------------------------------------------------------------------------------\"\n            )\n            self.printLogs()\n            print(\n                \"========================================================================================================================\"\n            )\n        }\n    }\n\n    public func printLogs() {\n        for log in self.logs {\n            var metadataString: String = \"\"\n            var node: String = \"\"\n            if var metadata = log.metadata {\n                if let n = metadata.removeValue(forKey: \"swim/node\") {\n                    node = \"[\\(n)]\"\n                }\n\n                metadata.removeValue(forKey: \"label\")\n                self.settings.ignoredMetadata.forEach { ignoreKey in\n                    metadata.removeValue(forKey: ignoreKey)\n                }\n                if !metadata.isEmpty {\n                    metadataString = \"\\n// metadata:\\n\"\n                    for key in metadata.keys.sorted() {\n                        let value: Logger.MetadataValue = metadata[key]!\n                        let valueDescription = self.prettyPrint(metadata: value)\n\n                        var allString = \"\\n// \\\"\\(key)\\\": \\(valueDescription)\"\n                        if allString.contains(\"\\n\") {\n                            allString = String(\n                                allString.split(separator: \"\\n\").map { valueLine in\n                                    if valueLine.starts(with: \"// \") {\n                                        return \"\\(valueLine)\\n\"\n                                    } else {\n                                        return \"// \\(valueLine)\\n\"\n                                    }\n                                }.joined(separator: \"\")\n                            )\n                        }\n                        metadataString.append(allString)\n                    }\n                    metadataString = String(metadataString.dropLast(1))\n                }\n            }\n            let date = Self._createFormatter().string(from: log.date)\n            let file = log.file.split(separator: \"/\").last ?? \"\"\n            let line = log.line\n            print(\n                \"[\\(self.captureLabel)][\\(date)] [\\(file):\\(line)]\\(node) [\\(log.level)] \\(log.message)\\(metadataString)\"\n            )\n        }\n    }\n\n    public static func _createFormatter() -> DateFormatter {\n        let formatter = DateFormatter()\n        formatter.dateFormat = \"y-MM-dd H:m:ss.SSSS\"\n        formatter.locale = Locale(identifier: \"en_US\")\n        formatter.calendar = Calendar(identifier: .gregorian)\n        return formatter\n    }\n\n    internal func prettyPrint(metadata: Logger.MetadataValue) -> String {\n        let CONSOLE_RESET = \"\\u{001B}[0;0m\"\n        let CONSOLE_BOLD = \"\\u{001B}[1m\"\n\n        var valueDescription = \"\"\n        switch metadata {\n        case .string(let string):\n            valueDescription = string\n        case .stringConvertible(let convertible):\n            valueDescription = convertible.description\n        case .array(let array):\n            valueDescription = \"\\n  \\(array.map { \"\\($0)\" }.joined(separator: \"\\n  \"))\"\n        case .dictionary(let metadata):\n            for k in metadata.keys {\n                valueDescription += \"\\(CONSOLE_BOLD)\\(k)\\(CONSOLE_RESET): \\(self.prettyPrint(metadata: metadata[k]!))\"\n            }\n        }\n\n        return valueDescription\n    }\n}\n\npublic struct CapturedLogMessage {\n    public let date: Date\n    public let level: Logger.Level\n    public var message: Logger.Message\n    public var metadata: Logger.Metadata?\n    public let file: String\n    public let function: String\n    public let line: UInt\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: LogCapture LogHandler\n\nstruct LogCaptureLogHandler: LogHandler {\n    let label: String\n    let capture: LogCapture\n\n    init(label: String, _ capture: LogCapture) {\n        self.label = label\n        self.capture = capture\n    }\n\n    public func log(\n        level: Logger.Level,\n        message: Logger.Message,\n        metadata: Logger.Metadata?,\n        file: String,\n        function: String,\n        line: UInt\n    ) {\n        guard\n            self.capture.settings.grep.isEmpty\n                || self.capture.settings.grep.contains(where: { \"\\(message)\".contains($0) })\n        else {\n            return  // log was included explicitly\n        }\n        guard !self.capture.settings.excludeGrep.contains(where: { \"\\(message)\".contains($0) }) else {\n            return  // log was excluded explicitly\n        }\n\n        let date = Date()\n        var _metadata: Logger.Metadata = self.metadata\n        _metadata.merge(metadata ?? [:], uniquingKeysWith: { _, r in r })\n        _metadata[\"label\"] = \"\\(self.label)\"\n\n        self.capture.append(\n            CapturedLogMessage(\n                date: date,\n                level: level,\n                message: message,\n                metadata: _metadata,\n                file: file,\n                function: function,\n                line: line\n            )\n        )\n    }\n\n    public subscript(metadataKey metadataKey: String) -> Logger.Metadata.Value? {\n        get {\n            self.metadata[metadataKey]\n        }\n        set {\n            self.metadata[metadataKey] = newValue\n        }\n    }\n\n    public var metadata: Logging.Logger.Metadata = [:]\n\n    public var logLevel: Logger.Level {\n        get {\n            self.capture.settings.minimumLogLevel\n        }\n        set {\n            // ignore, we always collect all logs\n        }\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Should matchers\n\nextension LogCapture {\n    /// Asserts that a message matching the query requirements was captures *already* (without waiting for it to appear)\n    ///\n    /// - Parameter message: can be surrounded like `*what*` to query as a \"contains\" rather than an == on the captured logs.\n    @discardableResult\n    public func shouldContain(\n        prefix: String? = nil,\n        message: String? = nil,\n        grep: String? = nil,\n        at level: Logger.Level? = nil,\n        expectedFile: String? = nil,\n        expectedLine: Int = -1,\n        failTest: Bool = true,\n        file: StaticString = #file,\n        line: UInt = #line,\n        column: UInt = #column\n    ) throws -> CapturedLogMessage {\n        precondition(\n            prefix != nil || message != nil || grep != nil || level != nil || level != nil || expectedFile != nil,\n            \"At least one query parameter must be not `nil`!\"\n        )\n\n        let found = self.logs.lazy\n            .filter { log in\n                if let expected = message {\n                    if expected.first == \"*\", expected.last == \"*\" {\n                        return \"\\(log.message)\".contains(expected.dropFirst().dropLast())\n                    } else {\n                        return expected == \"\\(log.message)\"\n                    }\n                } else {\n                    return true\n                }\n            }.filter { log in\n                if let expected = prefix {\n                    return \"\\(log.message)\".starts(with: expected)\n                } else {\n                    return true\n                }\n            }.filter { log in\n                if let expected = grep {\n                    return \"\\(log)\".contains(expected)\n                } else {\n                    return true\n                }\n            }.filter { log in\n                if let expected = level {\n                    return log.level == expected\n                } else {\n                    return true\n                }\n            }.filter { log in\n                if let expected = expectedFile {\n                    return expected == \"\\(log.file)\"\n                } else {\n                    return true\n                }\n            }.filter { log in\n                if expectedLine > -1 {\n                    return log.line == expectedLine\n                } else {\n                    return true\n                }\n            }.first\n\n        if let found = found {\n            return found\n        } else {\n            let query = [\n                prefix.map {\n                    \"prefix: \\\"\\($0)\\\"\"\n                },\n                message.map {\n                    \"message: \\\"\\($0)\\\"\"\n                },\n                grep.map {\n                    \"grep: \\\"\\($0)\\\"\"\n                },\n                level.map {\n                    \"level: \\($0)\"\n                } ?? \"\",\n                expectedFile.map {\n                    \"expectedFile: \\\"\\($0)\\\"\"\n                },\n                (expectedLine > -1 ? Optional(expectedLine) : nil).map {\n                    \"expectedLine: \\($0)\"\n                },\n            ].compactMap {\n                $0\n            }\n            .joined(separator: \", \")\n\n            let message = \"\"\"\n                Did not find expected log, matching query: \n                    [\\(query)]\n                in captured logs at \\(file):\\(line)\n                \"\"\"\n            if failTest {\n                XCTFail(message, file: (file), line: line)\n            }\n\n            throw LogCaptureError(message: message, file: file, line: line, column: column)\n        }\n    }\n\n    public func grep(_ string: String, metadata metadataQuery: [String: String] = [:]) -> [CapturedLogMessage] {\n        self.logs.filter {\n            guard \"\\($0)\".contains(string) else {\n                // mismatch, exclude it\n                return false\n            }\n\n            if metadataQuery.isEmpty {\n                return true\n            }\n\n            let metas = $0.metadata ?? [:]\n            for (queryKey, queryValue) in metadataQuery {\n                if let value = metas[queryKey] {\n                    if queryValue != \"\\(value)\" {\n                        // mismatch, exclude it\n                        return false\n                    }  // ok, continue checking other keys\n                } else {\n                    // key did not exist\n                    return false\n                }\n            }\n\n            return true\n        }\n    }\n}\n\ninternal struct LogCaptureError: Error, CustomStringConvertible {\n    let message: String\n    let file: StaticString\n    let line: UInt\n    let column: UInt\n    var description: String {\n        \"LogCaptureError(\\(message) at \\(file):\\(line) column:\\(column))\"\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMTestKit/TestMetrics.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\n//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Metrics API open source project\n//\n// Copyright (c) 2018-2019 Apple Inc. and the Swift Metrics API project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Metrics API project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport XCTest\n\n@testable import CoreMetrics\n@testable import Metrics\n@testable import SWIM\n\n/// Taken directly from swift-metrics's own test package.\n///\n/// Metrics factory which allows inspecting recorded metrics programmatically.\n/// Only intended for tests of the Metrics API itself.\npublic final class TestMetrics: MetricsFactory {\n    private let lock = NSLock()\n\n    public typealias Label = String\n    public typealias Dimensions = String\n\n    public struct FullKey {\n        let label: Label\n        let dimensions: [(String, String)]\n    }\n\n    private var counters = [FullKey: CounterHandler]()\n    private var recorders = [FullKey: RecorderHandler]()\n    private var timers = [FullKey: TimerHandler]()\n\n    public init() {\n        // nothing to do\n    }\n\n    public func makeCounter(label: String, dimensions: [(String, String)]) -> CounterHandler {\n        self.make(label: label, dimensions: dimensions, registry: &self.counters, maker: TestCounter.init)\n    }\n\n    public func makeRecorder(label: String, dimensions: [(String, String)], aggregate: Bool) -> RecorderHandler {\n        let maker = { (label: String, dimensions: [(String, String)]) -> RecorderHandler in\n            TestRecorder(label: label, dimensions: dimensions, aggregate: aggregate)\n        }\n        return self.make(label: label, dimensions: dimensions, registry: &self.recorders, maker: maker)\n    }\n\n    public func makeTimer(label: String, dimensions: [(String, String)]) -> TimerHandler {\n        self.make(label: label, dimensions: dimensions, registry: &self.timers, maker: TestTimer.init)\n    }\n\n    private func make<Item>(\n        label: String,\n        dimensions: [(String, String)],\n        registry: inout [FullKey: Item],\n        maker: (String, [(String, String)]) -> Item\n    ) -> Item {\n        self.lock.withLock {\n            let item = maker(label, dimensions)\n            registry[.init(label: label, dimensions: dimensions)] = item\n            return item\n        }\n    }\n\n    public func destroyCounter(_ handler: CounterHandler) {\n        if let testCounter = handler as? TestCounter {\n            self.counters.removeValue(forKey: testCounter.key)\n        }\n    }\n\n    public func destroyRecorder(_ handler: RecorderHandler) {\n        if let testRecorder = handler as? TestRecorder {\n            self.recorders.removeValue(forKey: testRecorder.key)\n        }\n    }\n\n    public func destroyTimer(_ handler: TimerHandler) {\n        if let testTimer = handler as? TestTimer {\n            self.timers.removeValue(forKey: testTimer.key)\n        }\n    }\n}\n\nextension TestMetrics.FullKey: Hashable {\n    public func hash(into hasher: inout Hasher) {\n        self.label.hash(into: &hasher)\n        self.dimensions.forEach { dim in\n            dim.0.hash(into: &hasher)\n            dim.1.hash(into: &hasher)\n        }\n    }\n\n    public static func == (lhs: Self, rhs: Self) -> Bool {\n        lhs.label == rhs.label\n            && Dictionary(uniqueKeysWithValues: lhs.dimensions) == Dictionary(uniqueKeysWithValues: rhs.dimensions)\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Assertions\n\nextension TestMetrics {\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Counter\n\n    public func expectCounter(_ metric: Counter) throws -> TestCounter {\n        metric._handler as! TestCounter\n    }\n\n    public func expectCounter(_ label: String, _ dimensions: [(String, String)] = []) throws -> TestCounter {\n        let counter: CounterHandler\n        if let c: CounterHandler = self.counters[.init(label: label, dimensions: dimensions)] {\n            counter = c\n        } else {\n            throw TestMetricsError.missingMetric(label: label, dimensions: [])\n        }\n\n        guard let testCounter = counter as? TestCounter else {\n            throw TestMetricsError.illegalMetricType(metric: counter, expected: \"\\(TestCounter.self)\")\n        }\n\n        return testCounter\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Gauge\n\n    public func expectGauge(_ metric: Gauge) throws -> TestRecorder {\n        try self.expectRecorder(metric)\n    }\n\n    public func expectGauge(_ label: String, _ dimensions: [(String, String)] = []) throws -> TestRecorder {\n        try self.expectRecorder(label, dimensions)\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Recorder\n\n    public func expectRecorder(_ metric: Recorder) throws -> TestRecorder {\n        metric._handler as! TestRecorder\n    }\n\n    public func expectRecorder(_ label: String, _ dimensions: [(String, String)] = []) throws -> TestRecorder {\n        guard let counter = self.recorders[.init(label: label, dimensions: dimensions)] else {\n            throw TestMetricsError.missingMetric(label: label, dimensions: [])\n        }\n        guard let testRecorder = counter as? TestRecorder else {\n            throw TestMetricsError.illegalMetricType(metric: counter, expected: \"\\(TestRecorder.self)\")\n        }\n\n        return testRecorder\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Timer\n\n    public func expectTimer(_ metric: Timer) throws -> TestTimer {\n        metric._handler as! TestTimer\n    }\n\n    public func expectTimer(_ label: String, _ dimensions: [(String, String)] = []) throws -> TestTimer {\n        guard let counter = self.timers[.init(label: label, dimensions: dimensions)] else {\n            throw TestMetricsError.missingMetric(label: label, dimensions: [])\n        }\n        guard let testTimer = counter as? TestTimer else {\n            throw TestMetricsError.illegalMetricType(metric: counter, expected: \"\\(TestTimer.self)\")\n        }\n\n        return testTimer\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Metric type implementations\n\npublic protocol TestMetric {\n    associatedtype Value\n\n    var key: TestMetrics.FullKey { get }\n\n    var lastValue: Value? { get }\n    var last: (Date, Value)? { get }\n}\n\npublic final class TestCounter: TestMetric, CounterHandler, Equatable {\n    public let id: String\n    public let label: String\n    public let dimensions: [(String, String)]\n\n    public var key: TestMetrics.FullKey {\n        .init(label: self.label, dimensions: self.dimensions)\n    }\n\n    let lock = NSLock()\n    private var values = [(Date, Int64)]()\n\n    init(label: String, dimensions: [(String, String)]) {\n        self.id = NSUUID().uuidString\n        self.label = label\n        self.dimensions = dimensions\n    }\n\n    public func increment(by amount: Int64) {\n        self.lock.withLock {\n            self.values.append((Date(), amount))\n        }\n        print(\"adding \\(amount) to \\(self.label)\\(self.dimensions.map { \"\\($0):\\($1)\" })\")\n    }\n\n    public func reset() {\n        self.lock.withLock {\n            self.values = []\n        }\n        print(\"resetting \\(self.label)\")\n    }\n\n    public var lastValue: Int64? {\n        self.lock.withLock {\n            values.last?.1\n        }\n    }\n\n    public var totalValue: Int64 {\n        self.lock.withLock {\n            values.map { $0.1 }.reduce(0, +)\n        }\n    }\n\n    public var last: (Date, Int64)? {\n        self.lock.withLock {\n            values.last\n        }\n    }\n\n    public static func == (lhs: TestCounter, rhs: TestCounter) -> Bool {\n        lhs.id == rhs.id\n    }\n}\n\npublic final class TestRecorder: TestMetric, RecorderHandler, Equatable {\n    public let id: String\n    public let label: String\n    public let dimensions: [(String, String)]\n    public let aggregate: Bool\n\n    public var key: TestMetrics.FullKey {\n        .init(label: self.label, dimensions: self.dimensions)\n    }\n\n    let lock = NSLock()\n    private var values = [(Date, Double)]()\n\n    init(label: String, dimensions: [(String, String)], aggregate: Bool) {\n        self.id = NSUUID().uuidString\n        self.label = label\n        self.dimensions = dimensions\n        self.aggregate = aggregate\n    }\n\n    public func record(_ value: Int64) {\n        self.record(Double(value))\n    }\n\n    public func record(_ value: Double) {\n        self.lock.withLock {\n            // this may loose precision but good enough as an example\n            values.append((Date(), Double(value)))\n        }\n        print(\"recording \\(value) in \\(self.label)\\(self.dimensions.map { \"\\($0):\\($1)\" })\")\n    }\n\n    public var lastValue: Double? {\n        self.lock.withLock {\n            values.last?.1\n        }\n    }\n\n    public var last: (Date, Double)? {\n        self.lock.withLock {\n            values.last\n        }\n    }\n\n    public static func == (lhs: TestRecorder, rhs: TestRecorder) -> Bool {\n        lhs.id == rhs.id\n    }\n}\n\npublic final class TestTimer: TestMetric, TimerHandler, Equatable {\n    public let id: String\n    public let label: String\n    public var displayUnit: TimeUnit?\n    public let dimensions: [(String, String)]\n\n    public var key: TestMetrics.FullKey {\n        .init(label: self.label, dimensions: self.dimensions)\n    }\n\n    let lock = NSLock()\n    private var _values = [(Date, Int64)]()\n\n    init(label: String, dimensions: [(String, String)]) {\n        self.id = NSUUID().uuidString\n        self.label = label\n        self.displayUnit = nil\n        self.dimensions = dimensions\n    }\n\n    public func preferDisplayUnit(_ unit: TimeUnit) {\n        self.lock.withLock {\n            self.displayUnit = unit\n        }\n    }\n\n    func retrieveValueInPreferredUnit(atIndex i: Int) -> Double {\n        self.lock.withLock {\n            let value = _values[i].1\n            guard let displayUnit = self.displayUnit else {\n                return Double(value)\n            }\n            return Double(value) / Double(displayUnit.scaleFromNanoseconds)\n        }\n    }\n\n    public func recordNanoseconds(_ duration: Int64) {\n        self.lock.withLock {\n            _values.append((Date(), duration))\n        }\n        print(\"recording \\(duration) in \\(self.label)\\(self.dimensions.map { \"\\($0):\\($1)\" })\")\n    }\n\n    public var lastValue: Int64? {\n        self.lock.withLock {\n            _values.last?.1\n        }\n    }\n\n    public var values: [Int64] {\n        self.lock.withLock {\n            _values.map { $0.1 }\n        }\n    }\n\n    public var last: (Date, Int64)? {\n        self.lock.withLock {\n            _values.last\n        }\n    }\n\n    public static func == (lhs: TestTimer, rhs: TestTimer) -> Bool {\n        lhs.id == rhs.id\n    }\n}\n\nextension NSLock {\n    fileprivate func withLock<T>(_ body: () -> T) -> T {\n        self.lock()\n        defer {\n            self.unlock()\n        }\n        return body()\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Errors\n\npublic enum TestMetricsError: Error {\n    case missingMetric(label: String, dimensions: [(String, String)])\n    case illegalMetricType(metric: Any, expected: String)\n}\n"
  },
  {
    "path": "Tests/SWIMTests/HeapTests.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the SwiftNIO open source project\n//\n// Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.md for the list of SwiftNIO project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport XCTest\n\n@testable import SWIM\n\npublic func getRandomNumbers(count: Int) -> [UInt8] {\n    var values: [UInt8] = .init(repeating: 0, count: count)\n    let fd = open(\"/dev/urandom\", O_RDONLY)\n    precondition(fd >= 0)\n    defer {\n        close(fd)\n    }\n    _ = values.withUnsafeMutableBytes { ptr in\n        read(fd, ptr.baseAddress!, ptr.count)\n    }\n    return values\n}\n\nclass HeapTests: XCTestCase {\n    func testSimple() throws {\n        var h = Heap<Int>(type: .maxHeap)\n        h.append(1)\n        h.append(3)\n        h.append(2)\n        XCTAssertEqual(3, h.removeRoot())\n        XCTAssertTrue(h.checkHeapProperty())\n    }\n\n    func testSortedDesc() throws {\n        var maxHeap = Heap<Int>(type: .maxHeap)\n        var minHeap = Heap<Int>(type: .minHeap)\n\n        let input = [16, 14, 10, 9, 8, 7, 4, 3, 2, 1]\n        input.forEach {\n            minHeap.append($0)\n            maxHeap.append($0)\n            XCTAssertTrue(minHeap.checkHeapProperty())\n            XCTAssertTrue(maxHeap.checkHeapProperty())\n        }\n        var minHeapInputPtr = input.count - 1\n        var maxHeapInputPtr = 0\n        while let maxE = maxHeap.removeRoot(), let minE = minHeap.removeRoot() {\n            XCTAssertEqual(maxE, input[maxHeapInputPtr], \"\\(maxHeap.debugDescription)\")\n            XCTAssertEqual(minE, input[minHeapInputPtr])\n            maxHeapInputPtr += 1\n            minHeapInputPtr -= 1\n            XCTAssertTrue(minHeap.checkHeapProperty(), \"\\(minHeap.debugDescription)\")\n            XCTAssertTrue(maxHeap.checkHeapProperty())\n        }\n        XCTAssertEqual(-1, minHeapInputPtr)\n        XCTAssertEqual(input.count, maxHeapInputPtr)\n    }\n\n    func testSortedAsc() throws {\n        var maxHeap = Heap<Int>(type: .maxHeap)\n        var minHeap = Heap<Int>(type: .minHeap)\n\n        let input = Array([16, 14, 10, 9, 8, 7, 4, 3, 2, 1].reversed())\n        input.forEach {\n            minHeap.append($0)\n            maxHeap.append($0)\n        }\n        var minHeapInputPtr = 0\n        var maxHeapInputPtr = input.count - 1\n        while let maxE = maxHeap.removeRoot(), let minE = minHeap.removeRoot() {\n            XCTAssertEqual(maxE, input[maxHeapInputPtr])\n            XCTAssertEqual(minE, input[minHeapInputPtr])\n            maxHeapInputPtr -= 1\n            minHeapInputPtr += 1\n        }\n        XCTAssertEqual(input.count, minHeapInputPtr)\n        XCTAssertEqual(-1, maxHeapInputPtr)\n    }\n\n    func testSortedCustom() throws {\n        struct Test: Equatable {\n            let x: Int\n        }\n\n        var maxHeap = Heap(of: Test.self) {\n            $0.x > $1.x\n        }\n        var minHeap = Heap(of: Test.self) {\n            $0.x < $1.x\n        }\n\n        let input = Array([16, 14, 10, 9, 8, 7, 4, 3, 2, 1].reversed().map { Test(x: $0) })\n        input.forEach {\n            minHeap.append($0)\n            maxHeap.append($0)\n        }\n        var minHeapInputPtr = 0\n        var maxHeapInputPtr = input.count - 1\n        while let maxE = maxHeap.removeRoot(), let minE = minHeap.removeRoot() {\n            XCTAssertEqual(maxE, input[maxHeapInputPtr])\n            XCTAssertEqual(minE, input[minHeapInputPtr])\n            maxHeapInputPtr -= 1\n            minHeapInputPtr += 1\n        }\n        XCTAssertEqual(input.count, minHeapInputPtr)\n        XCTAssertEqual(-1, maxHeapInputPtr)\n    }\n\n    func testAddAndRemoveRandomNumbers() throws {\n        var maxHeap = Heap<UInt8>(type: .maxHeap)\n        var minHeap = Heap<UInt8>(type: .minHeap)\n        var maxHeapLast = UInt8.max\n        var minHeapLast = UInt8.min\n\n        let N = 100\n\n        for n in getRandomNumbers(count: N) {\n            maxHeap.append(n)\n            minHeap.append(n)\n            XCTAssertTrue(maxHeap.checkHeapProperty(), maxHeap.debugDescription)\n            XCTAssertTrue(minHeap.checkHeapProperty(), maxHeap.debugDescription)\n\n            XCTAssertEqual(Array(minHeap.sorted()), Array(minHeap))\n            XCTAssertEqual(Array(maxHeap.sorted().reversed()), Array(maxHeap))\n        }\n\n        for _ in 0..<N / 2 {\n            var value = maxHeap.removeRoot()!\n            XCTAssertLessThanOrEqual(value, maxHeapLast)\n            maxHeapLast = value\n            value = minHeap.removeRoot()!\n            XCTAssertGreaterThanOrEqual(value, minHeapLast)\n            minHeapLast = value\n\n            XCTAssertTrue(minHeap.checkHeapProperty())\n            XCTAssertTrue(maxHeap.checkHeapProperty())\n\n            XCTAssertEqual(Array(minHeap.sorted()), Array(minHeap))\n            XCTAssertEqual(Array(maxHeap.sorted().reversed()), Array(maxHeap))\n        }\n\n        maxHeapLast = UInt8.max\n        minHeapLast = UInt8.min\n\n        for n in getRandomNumbers(count: N) {\n            maxHeap.append(n)\n            minHeap.append(n)\n            XCTAssertTrue(maxHeap.checkHeapProperty(), maxHeap.debugDescription)\n            XCTAssertTrue(minHeap.checkHeapProperty(), maxHeap.debugDescription)\n        }\n\n        for _ in 0..<N / 2 + N {\n            var value = maxHeap.removeRoot()!\n            XCTAssertLessThanOrEqual(value, maxHeapLast)\n            maxHeapLast = value\n            value = minHeap.removeRoot()!\n            XCTAssertGreaterThanOrEqual(value, minHeapLast)\n            minHeapLast = value\n\n            XCTAssertTrue(minHeap.checkHeapProperty())\n            XCTAssertTrue(maxHeap.checkHeapProperty())\n        }\n\n        XCTAssertEqual(0, minHeap.underestimatedCount)\n        XCTAssertEqual(0, maxHeap.underestimatedCount)\n    }\n\n    func testRemoveElement() throws {\n        var h = Heap<Int>(type: .maxHeap, storage: [84, 22, 19, 21, 3, 10, 6, 5, 20])!\n        _ = h.remove(value: 10)\n        XCTAssertTrue(h.checkHeapProperty(), \"\\(h.debugDescription)\")\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMTests/SWIMInstanceTests.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2019 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport XCTest\n\n@testable import ClusterMembership\n@testable import SWIM\n\nfinal class SWIMInstanceTests: XCTestCase {\n    let myselfNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7001, uid: 1111)\n    let secondNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7002, uid: 2222)\n    let thirdNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7003, uid: 3333)\n    let fourthNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7004, uid: 4444)\n    let fifthNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7005, uid: 5555)\n\n    var myself: TestPeer!\n    var second: TestPeer!\n    var third: TestPeer!\n    var fourth: TestPeer!\n    var fifth: TestPeer!\n\n    override func setUp() {\n        super.setUp()\n        self.myself = TestPeer(node: self.myselfNode)\n        self.second = TestPeer(node: self.secondNode)\n        self.third = TestPeer(node: self.thirdNode)\n        self.fourth = TestPeer(node: self.fourthNode)\n        self.fifth = TestPeer(node: self.fifthNode)\n    }\n\n    override func tearDown() {\n        super.tearDown()\n        self.myself = nil\n        self.second = nil\n        self.third = nil\n        self.fourth = nil\n        self.fifth = nil\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Detecting myself\n\n    func test_notMyself_shouldDetectRemoteVersionOfSelf() {\n        let swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        XCTAssertFalse(swim.notMyself(self.myself))\n    }\n\n    func test_notMyself_shouldDetectRandomNotMyselfActor() {\n        let someone = self.second!\n\n        let swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        XCTAssertTrue(swim.notMyself(someone))\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Marking members as various statuses\n\n    func test_mark_shouldNotApplyEqualStatus() throws {\n        let otherPeer = self.second!\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(otherPeer, status: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]))\n        swim.incrementProtocolPeriod()\n\n        try self.validateMark(\n            swim: &swim,\n            peer: otherPeer,\n            status: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]),\n            shouldSucceed: false\n        )\n\n        XCTAssertEqual(swim.member(for: otherPeer)!.protocolPeriod, 0)\n    }\n\n    func test_mark_shouldApplyNewerStatus() throws {\n        let otherPeer = self.second!\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(otherPeer, status: .alive(incarnation: 0))\n\n        for i: SWIM.Incarnation in 0...5 {\n            swim.incrementProtocolPeriod()\n            try self.validateMark(\n                swim: &swim,\n                peer: otherPeer,\n                status: .suspect(incarnation: SWIM.Incarnation(i), suspectedBy: [self.thirdNode]),\n                shouldSucceed: true\n            )\n            try self.validateMark(\n                swim: &swim,\n                peer: otherPeer,\n                status: .alive(incarnation: SWIM.Incarnation(i + 1)),\n                shouldSucceed: true\n            )\n        }\n\n        XCTAssertEqual(swim.member(for: otherPeer)!.protocolPeriod, 6)\n    }\n\n    func test_mark_shouldNotApplyOlderStatus_suspect() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        // ==== Suspect member -----------------------------------------------------------------------------------------\n        let suspectMember = self.second!\n        _ = swim.addMember(suspectMember, status: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]))\n        swim.incrementProtocolPeriod()\n\n        try self.validateMark(\n            swim: &swim,\n            peer: suspectMember,\n            status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n            shouldSucceed: false\n        )\n        try self.validateMark(swim: &swim, peer: suspectMember, status: .alive(incarnation: 1), shouldSucceed: false)\n\n        XCTAssertEqual(swim.member(for: suspectMember)!.protocolPeriod, 0)\n    }\n\n    func test_mark_shouldNotApplyOlderStatus_unreachable() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let unreachableMember = TestPeer(node: self.secondNode)\n        _ = swim.addMember(unreachableMember, status: .unreachable(incarnation: 1))\n        swim.incrementProtocolPeriod()\n\n        try self.validateMark(\n            swim: &swim,\n            peer: unreachableMember,\n            status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n            shouldSucceed: false\n        )\n        try self.validateMark(\n            swim: &swim,\n            peer: unreachableMember,\n            status: .alive(incarnation: 1),\n            shouldSucceed: false\n        )\n\n        XCTAssertEqual(swim.member(for: unreachableMember)!.protocolPeriod, 0)\n    }\n\n    func test_mark_shouldApplyDead() throws {\n        let otherPeer = self.second!\n\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(otherPeer, status: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]))\n        swim.incrementProtocolPeriod()\n\n        try self.validateMark(swim: &swim, peer: otherPeer, status: .dead, shouldSucceed: true)\n\n        XCTAssertEqual(swim.isMember(otherPeer), false)\n    }\n\n    func test_mark_shouldNotApplyAnyStatusIfAlreadyDead() throws {\n        let otherPeer = self.second!\n\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(otherPeer, status: .dead)\n        swim.incrementProtocolPeriod()\n\n        try self.validateMark(swim: &swim, peer: otherPeer, status: .alive(incarnation: 99), shouldSucceed: false)\n        try self.validateMark(\n            swim: &swim,\n            peer: otherPeer,\n            status: .suspect(incarnation: 99, suspectedBy: [self.thirdNode]),\n            shouldSucceed: false\n        )\n        try self.validateMark(swim: &swim, peer: otherPeer, status: .dead, shouldSucceed: false)\n\n        XCTAssertEqual(swim.member(for: otherPeer)!.protocolPeriod, 0)\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: handling ping-req responses\n\n    func test_onPingRequestResponse_allowsSuspectNodeToRefuteSuspicion() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let secondPeer = self.second!\n        let thirdPeer = self.third!\n\n        // thirdPeer is suspect already...\n        _ = swim.addMember(secondPeer, status: .alive(incarnation: 0))\n        _ = swim.addMember(thirdPeer, status: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]))\n\n        // Imagine: we asked secondPeer to ping thirdPeer\n        // thirdPeer pings secondPeer, gets an ack back -- and there secondPeer had to bump its incarnation number // TODO test for that, using Swim.instance?\n\n        // and now we get an `ack` back, secondPeer claims that thirdPeer is indeed alive!\n        _ = swim.onPingRequestResponse(\n            .ack(target: thirdPeer, incarnation: 2, payload: .none, sequenceNumber: 1),\n            pinged: thirdPeer\n        )\n        // may print the result for debugging purposes if one wanted to\n\n        // thirdPeer should be alive; after all, secondPeer told us so!\n        XCTAssertTrue(swim.member(for: thirdPeer)!.isAlive)\n    }\n\n    func test_onPingRequestResponse_ignoresTooOldRefutations() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let secondPeer = self.second!\n        let thirdPeer = self.third!\n\n        // thirdPeer is suspect already...\n        _ = swim.addMember(secondPeer, status: .alive(incarnation: 0))\n        _ = swim.addMember(thirdPeer, status: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]))\n\n        // Imagine: we asked secondPeer to ping thirdPeer\n        // thirdPeer pings secondPeer, yet secondPeer somehow didn't bump its incarnation... so we should NOT accept its refutation\n\n        // and now we get an `ack` back, secondPeer claims that thirdPeer is indeed alive!\n        _ = swim.onPingRequestResponse(\n            .ack(target: thirdPeer, incarnation: 1, payload: .none, sequenceNumber: 1),\n            pinged: thirdPeer\n        )\n        // may print the result for debugging purposes if one wanted to\n\n        // thirdPeer should be alive; after all, secondPeer told us so!\n        XCTAssertTrue(swim.member(for: thirdPeer)!.isSuspect)\n    }\n\n    func test_onPingRequestResponse_storeIndividualSuspicions() throws {\n        var settings: SWIM.Settings = .init()\n        settings.lifeguard.maxIndependentSuspicions = 10\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .suspect(incarnation: 1, suspectedBy: [self.secondNode]))\n\n        _ = swim.onPingRequestResponse(\n            .timeout(target: self.second, pingRequestOrigin: nil, timeout: .milliseconds(800), sequenceNumber: 1),\n            pinged: self.second\n        )\n        let resultStatus = swim.member(for: self.second)!.status\n        if case .suspect(_, let confirmations) = resultStatus {\n            XCTAssertEqual(confirmations, [secondNode, myselfNode])\n        } else {\n            XCTFail(\"Expected `.suspected(_, Set(0,1))`, got \\(resultStatus)\")\n            return\n        }\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: receive a ping and reply to it\n\n    func test_onPing_shouldOfferAckMessageWithMyselfReference() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n\n        let directive = swim.onPing(pingOrigin: self.second, payload: .none, sequenceNumber: 0).first!\n        switch directive {\n        case .sendAck(_, let pinged, _, _, _):\n            XCTAssertEqual(pinged.node, self.myselfNode)  // which was added as myself to this swim instance\n        case let other:\n            XCTFail(\"Expected .sendAck, but got \\(other)\")\n        }\n    }\n\n    func test_onPing_withAlive_shouldReplyWithAlive_withIncrementedIncarnation() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        // from our perspective, all nodes are alive...\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n\n        // Imagine: thirdPeer pings us, it suspects us (!)\n        // we (p1) receive the ping and want to refute the suspicion, we are Still Alive:\n        // (thirdPeer has heard from someone that we are suspect in incarnation 10 (for some silly reason))\n        let res = swim.onPing(pingOrigin: self.third, payload: .none, sequenceNumber: 0).first!\n\n        switch res {\n        case .sendAck(_, _, let incarnation, _, _):\n            // did not have to increment its incarnation number:\n            XCTAssertEqual(incarnation, 0)\n        case let reply:\n            XCTFail(\"Expected .sendAck ping response, but got \\(reply)\")\n        }\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Detecting when a change is \"effective\"\n\n    func test_MarkedDirective_isEffectiveChange() {\n        let p = self.myself!\n\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: nil,\n                member: SWIM.Member(peer: p, status: .alive(incarnation: 1), protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: nil,\n                member: SWIM.Member(\n                    peer: p,\n                    status: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 1\n                )\n            )\n            .isReachabilityChange\n        )\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: nil,\n                member: SWIM.Member(peer: p, status: .unreachable(incarnation: 1), protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: nil,\n                member: SWIM.Member(peer: p, status: .dead, protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n\n        XCTAssertFalse(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .alive(incarnation: 1),\n                member: SWIM.Member(peer: p, status: .alive(incarnation: 2), protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n        XCTAssertFalse(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .alive(incarnation: 1),\n                member: SWIM.Member(\n                    peer: p,\n                    status: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 1\n                )\n            )\n            .isReachabilityChange\n        )\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .alive(incarnation: 1),\n                member: SWIM.Member(peer: p, status: .unreachable(incarnation: 1), protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .alive(incarnation: 1),\n                member: SWIM.Member(peer: p, status: .dead, protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n\n        XCTAssertFalse(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]),\n                member: SWIM.Member(peer: p, status: .alive(incarnation: 2), protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n        XCTAssertFalse(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]),\n                member: SWIM.Member(\n                    peer: p,\n                    status: .suspect(incarnation: 2, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 1\n                )\n            )\n            .isReachabilityChange\n        )\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]),\n                member: SWIM.Member(peer: p, status: .unreachable(incarnation: 2), protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .suspect(incarnation: 1, suspectedBy: [self.thirdNode]),\n                member: SWIM.Member(peer: p, status: .dead, protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .unreachable(incarnation: 1),\n                member: SWIM.Member(peer: p, status: .alive(incarnation: 2), protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n        XCTAssertTrue(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .unreachable(incarnation: 1),\n                member: SWIM.Member(\n                    peer: p,\n                    status: .suspect(incarnation: 2, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 1\n                )\n            )\n            .isReachabilityChange\n        )\n        XCTAssertFalse(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .unreachable(incarnation: 1),\n                member: SWIM.Member(peer: p, status: .unreachable(incarnation: 2), protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n        XCTAssertFalse(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .unreachable(incarnation: 1),\n                member: SWIM.Member(peer: p, status: .dead, protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n\n        // those are illegal, but even IF they happened at least we'd never bubble them up to high level\n        // moving from .dead to any other state is illegal and should assert // TODO: check\n        XCTAssertFalse(\n            SWIM.MemberStatusChangedEvent(\n                previousStatus: .dead,\n                member: SWIM.Member(peer: p, status: .dead, protocolPeriod: 1)\n            )\n            .isReachabilityChange\n        )\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: handling gossip about the receiving node\n\n    func test_onGossipPayload_myself_withAlive() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        let currentIncarnation = swim.incarnation\n\n        let myselfMember = swim.member\n\n        let directives = swim.onGossipPayload(about: myselfMember)\n\n        XCTAssertEqual(swim.incarnation, currentIncarnation)\n\n        switch directives.first {\n        case .applied:\n            ()  // ok\n        default:\n            XCTFail(\"Expected `.applied()`, \\(optional: directives)\")\n        }\n    }\n\n    func test_onGossipPayload_myself_withSuspectAndSameIncarnation_shouldIncrementIncarnation() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        let currentIncarnation = swim.incarnation\n\n        var myselfMember = swim.member\n        myselfMember.status = .suspect(incarnation: currentIncarnation, suspectedBy: [self.thirdNode])\n\n        let directives = swim.onGossipPayload(about: myselfMember)\n\n        XCTAssertEqual(swim.incarnation, currentIncarnation + 1)\n\n        switch directives.first {\n        case .applied:\n            ()\n        default:\n            XCTFail(\"Expected `.applied(warning: nil)`, \\(optional: directives)\")\n        }\n    }\n\n    func test_onGossipPayload_myself_withSuspectAndLowerIncarnation_shouldNotIncrementIncarnation() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        var currentIncarnation = swim.incarnation\n\n        var myselfMember = swim.member\n\n        // necessary to increment incarnation\n        myselfMember.status = .suspect(incarnation: currentIncarnation, suspectedBy: [self.thirdNode])\n        _ = swim.onGossipPayload(about: myselfMember)\n\n        currentIncarnation = swim.incarnation\n\n        myselfMember.status = .suspect(incarnation: currentIncarnation - 1, suspectedBy: [self.thirdNode])  // purposefully \"previous\"\n        let directives = swim.onGossipPayload(about: myselfMember)\n\n        XCTAssertEqual(swim.incarnation, currentIncarnation)\n\n        switch directives.first {\n        case .applied(nil):\n            ()\n        default:\n            XCTFail(\"Expected [ignored(level: nil, message: nil)], got \\(directives)\")\n        }\n    }\n\n    func test_onGossipPayload_myself_withSuspectAndHigherIncarnation_shouldNotIncrementIncarnation() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        let currentIncarnation = swim.incarnation\n\n        var myselfMember = swim.member\n\n        myselfMember.status = .suspect(incarnation: currentIncarnation + 6, suspectedBy: [self.thirdNode])\n        let directives = swim.onGossipPayload(about: myselfMember)\n\n        XCTAssertEqual(swim.incarnation, currentIncarnation)\n\n        switch directives.first {\n        case .applied(nil):\n            ()\n        default:\n            XCTFail(\"Expected `.none(message)`, got \\(directives)\")\n        }\n    }\n\n    func test_onGossipPayload_other_withDead() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        let other = self.second!\n\n        _ = swim.addMember(other, status: .alive(incarnation: 0))\n\n        var otherMember = swim.member(for: other)!\n        otherMember.status = .dead\n        let directives = swim.onGossipPayload(about: otherMember)\n\n        switch directives.first {\n        case .applied(.some(let change)) where change.status.isDead:\n            XCTAssertEqual(change.member, otherMember)\n        default:\n            XCTFail(\"Expected `.applied(.some(change to dead))`, got \\(directives)\")\n        }\n    }\n\n    func test_onGossipPayload_myself_withUnreachable_unreachabilityEnabled() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        var myselfMember = swim.member\n        myselfMember.status = .unreachable(incarnation: 1)\n        let directives = swim.onGossipPayload(about: myselfMember)\n\n        let myMember = swim.member\n        // we never accept other telling us about \"our future\" this is highly suspect!\n        // only we can be the origin of incarnation numbers after all.\n        XCTAssertEqual(myMember.status, .alive(incarnation: 0))\n\n        switch directives.first {\n        case .applied(nil):\n            ()\n        default:\n            XCTFail(\"Expected `.applied(_)`, got: \\(String(reflecting: directives))\")\n        }\n    }\n\n    func test_onGossipPayload_other_withUnreachable_unreachabilityEnabled() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n        let other = self.second!\n\n        _ = swim.addMember(other, status: .alive(incarnation: 0))\n\n        var otherMember = swim.member(for: other)!\n        otherMember.status = .unreachable(incarnation: 1)\n        let directives = swim.onGossipPayload(about: otherMember)\n\n        switch directives.first {\n        case .applied(.some(let change)) where change.status.isUnreachable:\n            XCTAssertEqual(change.member, otherMember)\n        default:\n            XCTFail(\"Expected `.applied(.some(change to unreachable))`, got: \\(String(reflecting: directives))\")\n        }\n    }\n\n    func test_onGossipPayload_myself_withOldUnreachable_unreachabilityEnabled() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n        swim.incrementProtocolPeriod()  // @1\n\n        var myselfMember = swim.member\n        myselfMember.status = .unreachable(incarnation: 0)\n        let directives = swim.onGossipPayload(about: myselfMember)\n\n        XCTAssertEqual(swim.member.status, .alive(incarnation: 1))  // equal to the incremented @1\n\n        switch directives.first {\n        case .applied(nil):\n            ()  // good\n        default:\n            XCTFail(\n                \"Expected `.ignored`, since the unreachable information is too old to matter anymore, got: \\(optional: directives)\"\n            )\n        }\n    }\n\n    func test_onGossipPayload_other_withOldUnreachable_unreachabilityEnabled() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n        let other = self.second!\n\n        _ = swim.addMember(other, status: .alive(incarnation: 10))\n\n        var otherMember = swim.member(for: other)!\n        otherMember.status = .unreachable(incarnation: 1)  // too old, we're already alive in 10\n        let directives = swim.onGossipPayload(about: otherMember)\n\n        if directives.isEmpty {\n            ()  // good\n        } else {\n            XCTFail(\n                \"Expected `[]]`, since the unreachable information is too old to matter anymore, got: \\(optional: directives)\"\n            )\n        }\n    }\n\n    func test_onGossipPayload_myself_withUnreachable_unreachabilityDisabled() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .disabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        var myselfMember = swim.member\n        myselfMember.status = .unreachable(incarnation: 1)\n\n        let directives = swim.onGossipPayload(about: myselfMember)\n\n        // we never accept other peers causing us to become some other status,\n        // we always view ourselves as reachable (alive) until dead.\n        let myMember = swim.member\n        XCTAssertEqual(myMember.status, .alive(incarnation: 0))\n\n        switch directives.first {\n        case .applied(nil):\n            ()  // ok, unreachability was disabled after all, so we completely ignore it\n        default:\n            XCTFail(\"Expected `.applied(_, .warning, ...)`, got: \\(directives)\")\n        }\n    }\n\n    func test_onGossipPayload_other_withUnreachable_unreachabilityDisabled() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .disabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n        let other = self.second!\n\n        _ = swim.addMember(other, status: .alive(incarnation: 0))\n\n        var otherMember = swim.member(for: other)!\n        otherMember.status = .unreachable(incarnation: 1)\n        // we receive an unreachability event, but we do not use this state, it should be automatically promoted to dead,\n        // other nodes may use unreachability e.g. when we're rolling out a reconfiguration, but they can't force\n        // us to keep those statuses of members, thus we always promote it to dead.\n        let directives = swim.onGossipPayload(about: otherMember)\n\n        switch directives.first {\n        case .applied(.some(let change)) where change.status.isDead:\n            otherMember.status = .dead  // with unreachability disabled, we automatically promoted it to .dead\n            XCTAssertEqual(change.member, otherMember)\n        default:\n            XCTFail(\"Expected `.applied(.some(change to dead))`, got: \\(directives)\")\n        }\n    }\n\n    func test_onGossipPayload_other_withNewSuspicion_shouldStoreIndividualSuspicions() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        let other = self.second!\n\n        _ = swim.addMember(other, status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]))\n        var otherMember = swim.member(for: other)!\n        otherMember.status = .suspect(incarnation: 0, suspectedBy: [self.secondNode])\n        let directives = swim.onGossipPayload(about: otherMember)\n        if case .applied(.some(let change)) = directives.first,\n            case .suspect(_, let confirmations) = change.status\n        {\n            XCTAssertEqual(confirmations.count, 2)\n            XCTAssertTrue(confirmations.contains(secondNode), \"expected \\(confirmations) to contain \\(secondNode)\")\n            XCTAssertTrue(confirmations.contains(thirdNode), \"expected \\(confirmations) to contain \\(thirdNode)\")\n        } else {\n            XCTFail(\"Expected `.applied(.some(suspect with multiple suspectedBy))`, got \\(directives)\")\n        }\n    }\n\n    func test_onGossipPayload_other_shouldNotApplyGossip_whenHaveEnoughSuspectedBy() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        let other = self.second!\n\n        let saturatedSuspectedByList = (1...swim.settings.lifeguard.maxIndependentSuspicions).map {\n            Node(protocol: \"test\", host: \"test\", port: 12345, uid: UInt64($0))\n        }\n\n        _ = swim.addMember(other, status: .suspect(incarnation: 0, suspectedBy: Set(saturatedSuspectedByList)))\n\n        var otherMember = swim.member(for: other)!\n        otherMember.status = .suspect(incarnation: 0, suspectedBy: [self.thirdNode])\n        let directives = swim.onGossipPayload(about: otherMember)\n        guard case [] = directives else {\n            XCTFail(\"Expected `[]]`, got \\(String(reflecting: directives))\")\n            return\n        }\n    }\n\n    func test_onGossipPayload_other_shouldNotExceedMaximumSuspectedBy() throws {\n        var settings: SWIM.Settings = .init()\n        settings.lifeguard.maxIndependentSuspicions = 3\n\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n        let other = self.second!\n\n        _ = swim.addMember(other, status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode, self.secondNode]))\n\n        var otherMember = swim.member(for: other)!\n        otherMember.status = .suspect(incarnation: 0, suspectedBy: [self.thirdNode, self.fourthNode])\n        let directives = swim.onGossipPayload(about: otherMember)\n        if case .applied(.some(let change)) = directives.first,\n            case .suspect(_, let confirmation) = change.status\n        {\n            XCTAssertEqual(confirmation.count, swim.settings.lifeguard.maxIndependentSuspicions)\n        } else {\n            XCTFail(\n                \"Expected `.applied(.some(suspectedBy)) where suspectedBy.count = maxIndependentSuspicions`, got \\(directives)\"\n            )\n        }\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: increment-ing counters\n\n    func test_incrementProtocolPeriod_shouldIncrementTheProtocolPeriodNumberByOne() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        for i in 0..<10 {\n            XCTAssertEqual(swim.protocolPeriod, UInt64(i))\n            swim.incrementProtocolPeriod()\n        }\n    }\n\n    func test_members_shouldContainAllAddedMembers() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let secondPeer = self.second!\n        let thirdPeer = self.third!\n\n        _ = swim.addMember(self.myself, status: .alive(incarnation: 0))\n        _ = swim.addMember(secondPeer, status: .alive(incarnation: 0))\n        _ = swim.addMember(thirdPeer, status: .alive(incarnation: 0))\n\n        XCTAssertTrue(swim.isMember(self.myself))\n        XCTAssertTrue(swim.isMember(secondPeer))\n        XCTAssertTrue(swim.isMember(thirdPeer))\n\n        XCTAssertEqual(swim.allMemberCount, 3)\n        XCTAssertEqual(swim.notDeadMemberCount, 3)\n        XCTAssertEqual(swim.otherMemberCount, 2)\n    }\n\n    func test_isMember_shouldAllowCheckingWhenNotKnowingSpecificUID() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(self.myself, status: .alive(incarnation: 0))\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n\n        XCTAssertTrue(swim.isMember(self.myself))\n        XCTAssertTrue(swim.isMember(self.myself, ignoreUID: true))\n\n        XCTAssertTrue(swim.isMember(TestPeer(node: self.secondNode.withoutUID), ignoreUID: true))\n        XCTAssertFalse(swim.isMember(TestPeer(node: self.secondNode.withoutUID)))\n\n        XCTAssertFalse(swim.isMember(TestPeer(node: self.thirdNode.withoutUID), ignoreUID: true))\n        XCTAssertFalse(swim.isMember(TestPeer(node: self.thirdNode.withoutUID)))\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Modifying LHA-probe multiplier\n\n    func test_onPingRequestResponse_incrementLHAMultiplier_whenMissedNack() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let secondPeer = self.second!\n\n        _ = swim.addMember(secondPeer, status: .alive(incarnation: 0))\n\n        XCTAssertEqual(swim.localHealthMultiplier, 0)\n        _ = swim.onEveryPingRequestResponse(\n            .timeout(target: secondPeer, pingRequestOrigin: nil, timeout: .milliseconds(300), sequenceNumber: 1),\n            pinged: secondPeer\n        )\n        XCTAssertEqual(swim.localHealthMultiplier, 1)\n    }\n\n    func test_onPingRequestResponse_handlesNacksCorrectly() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 0))\n        _ = swim.addMember(self.fourth, status: .suspect(incarnation: 0, suspectedBy: [self.third.node]))\n\n        XCTAssertEqual(swim.localHealthMultiplier, 0)\n        // pretend first sends:\n        //   - second.pingRequest(fourth)\n        //   - third.pingRequest(fourth)\n\n        // expect 2 nacks:\n\n        // get nack from second 1/2\n        _ = swim.onPingRequestResponse(\n            .timeout(target: self.fourth, pingRequestOrigin: nil, timeout: .nanoseconds(1), sequenceNumber: 2),\n            pinged: self.fourth\n        )\n        XCTAssertEqual(swim.localHealthMultiplier, 0)\n        // get nack from third 2/2\n        _ = swim.onPingRequestResponse(\n            .timeout(target: self.fourth, pingRequestOrigin: nil, timeout: .nanoseconds(1), sequenceNumber: 3),\n            pinged: self.fourth\n        )\n        XCTAssertEqual(swim.localHealthMultiplier, 0)\n    }\n\n    func test_onPingRequestResponse_handlesMissingNacksCorrectly() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 0))\n        _ = swim.addMember(self.fourth, status: .suspect(incarnation: 0, suspectedBy: [self.third.node]))\n\n        XCTAssertEqual(swim.localHealthMultiplier, 0)\n        // pretend first sends:\n        //   - second.pingRequest(fourth)\n        //   - third.pingRequest(fourth)\n\n        // timeout, no nack from third\n        _ = swim.onEveryPingRequestResponse(\n            .timeout(target: self.fourth, pingRequestOrigin: nil, timeout: .nanoseconds(1), sequenceNumber: 2),\n            pinged: self.fourth\n        )\n        XCTAssertEqual(swim.localHealthMultiplier, 1)\n        // timeout, no nack from third\n        _ = swim.onEveryPingRequestResponse(\n            .timeout(target: self.fourth, pingRequestOrigin: nil, timeout: .nanoseconds(1), sequenceNumber: 2),\n            pinged: self.fourth\n        )\n        XCTAssertEqual(swim.localHealthMultiplier, 2)\n\n        // all probes failed, thus the \"main\" one as well:\n        _ = swim.onPingRequestResponse(\n            .timeout(target: self.fourth, pingRequestOrigin: nil, timeout: .nanoseconds(1), sequenceNumber: 2),\n            pinged: self.fourth\n        )\n        // this was already accounted for in the onEveryPingRequestResponse\n        XCTAssertEqual(swim.localHealthMultiplier, 2)\n    }\n\n    // TODO: handle ack after nack scenarios; this needs modifications in SWIMNIO to handle these as well\n\n    func test_onPingRequestResponse_decrementLHAMultiplier_whenGotAck() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let secondPeer = self.second!\n\n        _ = swim.addMember(secondPeer, status: .alive(incarnation: 0))\n        swim.localHealthMultiplier = 1\n        _ = swim.onPingAckResponse(\n            target: secondPeer,\n            incarnation: 0,\n            payload: .none,\n            pingRequestOrigin: nil,\n            pingRequestSequenceNumber: nil,\n            sequenceNumber: 0\n        )\n        XCTAssertEqual(swim.localHealthMultiplier, 0)\n    }\n\n    func test_onPingAckResponse_forwardAckToOriginWithRightSequenceNumber_onAckFromTarget() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 12))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 33))\n\n        // let's pretend `third` asked us to ping `second`, and we get the ack back:\n        let pingRequestOrigin = self.third!\n        let pingRequestSequenceNumber: UInt32 = 1212\n\n        let directives = swim.onPingAckResponse(\n            target: self.second,\n            incarnation: 12,\n            payload: .none,\n            pingRequestOrigin: pingRequestOrigin,\n            pingRequestSequenceNumber: pingRequestSequenceNumber,\n            sequenceNumber: 2  // the sequence number that we used to send the `ping` with\n        )\n\n        XCTAssertTrue(\n            directives.contains {\n                switch $0 {\n                case .sendAck(let peer, let acknowledging, let target, let incarnation, _):\n                    XCTAssertEqual(peer.node, pingRequestOrigin.node)\n                    XCTAssertEqual(acknowledging, pingRequestSequenceNumber)\n                    XCTAssertEqual(self.second.node, target.node)\n                    XCTAssertEqual(incarnation, 12)\n                    return true\n                default:\n                    return false\n                }\n            },\n            \"directives should contain .sendAck\"\n        )\n    }\n\n    func test_onPingAckResponse_sendNackWithRightSequenceNumberToOrigin_onTimeout() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 12))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 33))\n\n        // let's pretend `third` asked us to ping `second`\n        let pingRequestOrigin = self.third!\n        let pingRequestSequenceNumber: UInt32 = 1212\n\n        // and we get a timeout (so we should send a nack to the origin)\n        let directives = swim.onPingResponseTimeout(\n            target: self.second,\n            timeout: .seconds(1),\n            pingRequestOrigin: pingRequestOrigin,\n            pingRequestSequenceNumber: pingRequestSequenceNumber\n        )\n\n        XCTAssertTrue(\n            directives.contains {\n                switch $0 {\n                case .sendNack(let peer, let acknowledging, let target):\n                    XCTAssertEqual(peer.node, pingRequestOrigin.node)\n                    XCTAssertEqual(acknowledging, pingRequestSequenceNumber)\n                    XCTAssertEqual(self.second.node, target.node)\n                    return true\n                default:\n                    return false\n                }\n            },\n            \"directives should contain .sendAck\"\n        )\n    }\n\n    func test_onPingRequestResponse_notIncrementLHAMultiplier_whenSeeOldSuspicion_onGossip() {\n        let p1 = self.myself!\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        // first suspicion is for current incarnation, should increase LHA counter\n        _ = swim.onGossipPayload(\n            about: SWIM.Member(\n                peer: p1,\n                status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                protocolPeriod: 0\n            )\n        )\n        XCTAssertEqual(swim.localHealthMultiplier, 1)\n        // second suspicion is for a stale incarnation, should ignore\n        _ = swim.onGossipPayload(\n            about: SWIM.Member(\n                peer: p1,\n                status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                protocolPeriod: 0\n            )\n        )\n        XCTAssertEqual(swim.localHealthMultiplier, 1)\n    }\n\n    func test_onPingRequestResponse_incrementLHAMultiplier_whenRefuteSuspicion_onGossip() {\n        let p1 = self.myself!\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.onGossipPayload(\n            about: SWIM.Member(\n                peer: p1,\n                status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                protocolPeriod: 0\n            )\n        )\n        XCTAssertEqual(swim.localHealthMultiplier, 1)\n    }\n\n    func test_onPingRequestResponse_dontChangeLHAMultiplier_whenGotNack() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let secondPeer = self.second!\n\n        _ = swim.addMember(secondPeer, status: .alive(incarnation: 0))\n        swim.localHealthMultiplier = 1\n\n        _ = swim.onEveryPingRequestResponse(.nack(target: secondPeer, sequenceNumber: 1), pinged: secondPeer)\n        XCTAssertEqual(swim.localHealthMultiplier, 1)\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Selecting members to ping\n\n    func test_nextMemberToPing_shouldReturnEachMemberOnceBeforeRepeatingAndKeepOrder() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let memberCount = 10\n        var members: Set<TestPeer> = []\n        for i in 1...memberCount {\n            var node = self.myselfNode\n            node.port = 8000 + i\n            let peer = TestPeer(node: node)\n            members.insert(peer)\n            _ = swim.addMember(peer, status: .alive(incarnation: 0))\n        }\n\n        var seenNodes: [Node] = []\n        for _ in 1...memberCount {\n            guard let member = swim.nextPeerToPing() else {\n                XCTFail(\"Could not fetch member to ping\")\n                return\n            }\n\n            seenNodes.append(member.node)\n            members = members.filter {\n                $0.node != member.node\n            }\n        }\n\n        XCTAssertTrue(members.isEmpty, \"all members should have been selected at least once\")\n\n        // should loop around and we should encounter all the same members now\n        for _ in 1...memberCount {\n            guard let member = swim.nextPeerToPing() else {\n                XCTFail(\"Could not fetch member to ping\")\n                return\n            }\n\n            XCTAssertEqual(seenNodes.removeFirst(), member.node)\n        }\n    }\n\n    func test_addMember_shouldAddAMemberWithTheSpecifiedStatusAndCurrentProtocolPeriod() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        let status: SWIM.Status = .alive(incarnation: 1)\n\n        swim.incrementProtocolPeriod()\n        swim.incrementProtocolPeriod()\n        swim.incrementProtocolPeriod()\n\n        XCTAssertFalse(swim.isMember(self.second))\n        _ = swim.addMember(self.second, status: status)\n\n        XCTAssertTrue(swim.isMember(self.second))\n        let member = swim.member(for: self.second)!\n        XCTAssertEqual(member.protocolPeriod, swim.protocolPeriod)\n        XCTAssertEqual(member.status, status)\n    }\n\n    func test_addMember_shouldNotAddLocalNodeForPinging() {\n        let otherPeer = self.second!\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: .init(), myself: otherPeer)\n\n        XCTAssertTrue(swim.isMember(otherPeer))\n        XCTAssertNil(swim.nextPeerToPing())\n    }\n\n    func test_addMember_shouldNotAddPeerWithoutUID() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: .init(), myself: self.myself)\n\n        let other = TestPeer(node: .init(protocol: \"test\", host: \"127.0.0.1\", port: 111, uid: nil))\n        let directives = swim.addMember(other, status: .alive(incarnation: 0))\n        XCTAssertEqual(directives.count, 0)\n        XCTAssertFalse(swim.isMember(other))\n        XCTAssertNil(swim.nextPeerToPing())\n    }\n\n    func test_addMember_shouldReplaceMemberIfDifferentUID() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: .init(), myself: self.myself)\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n        XCTAssertTrue(swim.isMember(self.second))\n\n        let restartedSecond = TestPeer(node: self.secondNode)\n        restartedSecond.swimNode.uid = self.second.node.uid! * 2\n\n        let directives = swim.addMember(restartedSecond, status: .alive(incarnation: 0))\n\n        switch directives.first {\n        case .previousHostPortMemberConfirmedDead(let event):\n            XCTAssertEqual(event.previousStatus, SWIM.Status.alive(incarnation: 0))\n            XCTAssertEqual(event.member.peer, self.second)\n        default:\n            XCTFail(\"Expected replacement directive, was: \\(optional: directives.first), in: \\(directives)\")\n        }\n        switch directives.dropFirst().first {\n        case .added(let addedMember):\n            XCTAssertEqual(addedMember.node, restartedSecond.node)\n            XCTAssertEqual(addedMember.status, SWIM.Status.alive(incarnation: 0))\n        default:\n            XCTFail(\"Expected .added as directive, was: \\(optional: directives.dropFirst().first), in: \\(directives)\")\n        }\n\n        XCTAssertTrue(swim.isMember(restartedSecond))\n        XCTAssertFalse(swim.isMember(self.second))\n\n        XCTAssertTrue(swim.isMember(self.myself))\n    }\n\n    func test_nextMemberToPingRequest() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let ds1 = swim.addMember(self.second, status: .alive(incarnation: 0))\n        XCTAssertEqual(ds1.count, 1)\n        guard case .added(let firstMember) = ds1.first else {\n            return XCTFail(\"Expected to successfully add peer, was: \\(ds1)\")\n        }\n        let ds2 = swim.addMember(self.third!, status: .alive(incarnation: 0))\n        XCTAssertEqual(ds2.count, 1)\n        guard case .added(let secondMember) = ds2.first else {\n            return XCTFail(\"Expected to successfully add peer, was: \\(ds2)\")\n        }\n        let ds3 = swim.addMember(self.fourth!, status: .alive(incarnation: 0))\n        XCTAssertEqual(ds3.count, 1)\n        guard case .added(let thirdMember) = ds3.first else {\n            return XCTFail(\"Expected to successfully add peer, was: \\(ds3)\")\n        }\n\n        let membersToPing = swim.membersToPingRequest(target: self.fifth!)\n        XCTAssertEqual(membersToPing.count, 3)\n\n        XCTAssertTrue(membersToPing.contains(firstMember))\n        XCTAssertTrue(membersToPing.contains(secondMember))\n        XCTAssertTrue(membersToPing.contains(thirdMember))\n    }\n\n    func test_member_shouldReturnTheLastAssignedStatus() {\n        let otherPeer = self.second!\n\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        _ = swim.addMember(otherPeer, status: .alive(incarnation: 0))\n        XCTAssertEqual(swim.member(for: otherPeer)!.status, .alive(incarnation: 0))\n\n        _ = swim.mark(otherPeer, as: .suspect(incarnation: 99, suspectedBy: [self.thirdNode]))\n        XCTAssertEqual(swim.member(for: otherPeer)!.status, .suspect(incarnation: 99, suspectedBy: [self.thirdNode]))\n    }\n\n    func test_member_shouldWorkForMyself() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: .init(), myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 10))\n\n        let member = swim.member\n        XCTAssertEqual(member.node, self.myself.node)\n        XCTAssertTrue(member.isAlive)\n        XCTAssertEqual(member.status, .alive(incarnation: 0))\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: (Round up the usual...) Suspects\n\n    func test_suspects_shouldContainOnlySuspectedNodes() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let aliveAtZero = SWIM.Status.alive(incarnation: 0)\n        _ = swim.addMember(self.second, status: aliveAtZero)\n        _ = swim.addMember(self.third, status: aliveAtZero)\n        _ = swim.addMember(self.fourth, status: aliveAtZero)\n        XCTAssertEqual(swim.notDeadMemberCount, 4)  // three new nodes + myself\n\n        self.validateSuspects(swim, expected: [])\n\n        let directive: SWIM.Instance.MarkedDirective = swim.mark(\n            self.second,\n            as: .suspect(incarnation: 0, suspectedBy: [self.third.node])\n        )\n        switch directive {\n        case .applied(let previousStatus, let member):\n            XCTAssertEqual(\n                previousStatus,\n                aliveAtZero\n            )\n            XCTAssertEqual(\n                member.status,\n                .suspect(incarnation: 0, suspectedBy: [self.third.node])\n            )\n        default:\n            XCTFail(\"Expected .applied, got: \\(directive)\")\n        }\n        self.validateSuspects(swim, expected: [self.second.node])\n\n        _ = swim.mark(self.third, as: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]))\n        self.validateSuspects(swim, expected: [self.second.node, self.third.node])\n\n        _ = swim.mark(self.second, as: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]))\n        _ = swim.mark(self.myself, as: .alive(incarnation: 1))\n        self.validateSuspects(swim, expected: [self.second.node, self.third.node])\n    }\n\n    func test_suspects_shouldMark_whenBiggerSuspicionList() {\n        var settings: SWIM.Settings = .init()\n        settings.lifeguard.maxIndependentSuspicions = 10\n\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        let aliveAtZero = SWIM.Status.alive(incarnation: 0)\n        _ = swim.addMember(self.second, status: aliveAtZero)\n        XCTAssertEqual(swim.notDeadMemberCount, 2)\n\n        self.validateSuspects(swim, expected: [])\n        let oldStatus: SWIM.Status = .suspect(incarnation: 0, suspectedBy: [self.thirdNode])\n        let d1 = swim.mark(self.second, as: oldStatus)\n        switch d1 {\n        case .applied(let previousStatus, let member):\n            XCTAssertEqual(previousStatus, aliveAtZero)\n            XCTAssertEqual(member.status, oldStatus)\n        default:\n            XCTFail(\"Expected .applied, but got: \\(d1)\")\n            return\n        }\n        self.validateSuspects(swim, expected: [self.second.node])\n        let newStatus: SWIM.Status = .suspect(incarnation: 0, suspectedBy: [self.thirdNode, self.secondNode])\n        let d2 = swim.mark(self.second, as: newStatus)\n        switch d2 {\n        case .applied(let previousStatus, let member):\n            XCTAssertEqual(previousStatus, oldStatus)\n            XCTAssertEqual(member.status, newStatus)\n        default:\n            XCTFail(\"Expected .applied, but got: \\(d1)\")\n            return\n        }\n        self.validateSuspects(swim, expected: [self.second.node])\n    }\n\n    func test_suspects_shouldNotMark_whenSmallerSuspicionList() {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        let aliveAtZero = SWIM.Status.alive(incarnation: 0)\n        _ = swim.addMember(self.second, status: aliveAtZero)\n        XCTAssertEqual(swim.notDeadMemberCount, 2)\n\n        self.validateSuspects(swim, expected: [])\n        let oldStatus: SWIM.Status = .suspect(incarnation: 0, suspectedBy: [self.thirdNode, self.secondNode])\n\n        let d1 = swim.mark(self.second, as: oldStatus)\n        switch d1 {\n        case .applied(let previousStatus, let member):\n            XCTAssertEqual(previousStatus, aliveAtZero)\n            XCTAssertEqual(member.status, oldStatus)\n        default:\n            XCTFail(\"Expected .applied, but got: \\(d1)\")\n            return\n        }\n        self.validateSuspects(swim, expected: [self.second.node])\n        let newStatus: SWIM.Status = .suspect(incarnation: 0, suspectedBy: [self.thirdNode])\n\n        XCTAssertEqual(swim.mark(self.second, as: newStatus), .ignoredDueToOlderStatus(currentStatus: oldStatus))\n        let d2 = swim.mark(self.second, as: newStatus)\n        switch d2 {\n        case .ignoredDueToOlderStatus(currentStatus: oldStatus):\n            ()  // ok\n        default:\n            XCTFail(\"Expected .ignoredDueToOlderStatus, but got: \\(d2)\")\n            return\n        }\n        self.validateSuspects(swim, expected: [self.second.node])\n    }\n\n    func test_memberCount_shouldNotCountDeadMembers() {\n        let settings = SWIM.Settings()\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        let aliveAtZero = SWIM.Status.alive(incarnation: 0)\n        _ = swim.addMember(self.second, status: aliveAtZero)\n        _ = swim.addMember(self.third, status: aliveAtZero)\n        _ = swim.addMember(self.fourth, status: aliveAtZero)\n        XCTAssertEqual(swim.notDeadMemberCount, 4)\n\n        _ = swim.mark(self.second, as: .dead)\n        XCTAssertEqual(swim.notDeadMemberCount, 3)\n\n        _ = swim.mark(self.fourth, as: .dead)\n        XCTAssertEqual(swim.notDeadMemberCount, 2)  // dead is not part of membership\n    }\n\n    func test_memberCount_shouldCountUnreachableMembers() {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        let aliveAtZero = SWIM.Status.alive(incarnation: 0)\n        _ = swim.addMember(self.second, status: aliveAtZero)\n        _ = swim.addMember(self.third, status: aliveAtZero)\n        _ = swim.addMember(self.fourth, status: aliveAtZero)\n        XCTAssertEqual(swim.notDeadMemberCount, 4)\n\n        _ = swim.mark(self.second, as: .dead)\n        XCTAssertEqual(swim.notDeadMemberCount, 3)\n\n        _ = swim.mark(self.third, as: .unreachable(incarnation: 19))\n        XCTAssertEqual(swim.notDeadMemberCount, 3)  // unreachable is still \"part of the membership\" as far as we are concerned\n\n        _ = swim.mark(self.fourth, as: .dead)\n        XCTAssertEqual(swim.notDeadMemberCount, 2)  // dead is not part of membership\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: makeGossipPayload\n\n    func test_makeGossipPayload_shouldGossipAboutSelf_whenNoMembers() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n\n        try self.validateGossip(\n            swim: &swim,\n            expected: [.init(peer: self.myself, status: .alive(incarnation: 0), protocolPeriod: 0)]\n        )\n    }\n\n    func test_makeGossipPayload_shouldEventuallyStopGossips() throws {\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: SWIM.Settings(), myself: self.myself)\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 0))\n\n        var count = 0\n        var gossip = swim.makeGossipPayload(to: nil)\n        while case .membership(let members) = gossip, members.count > 1 {\n            gossip = swim.makeGossipPayload(to: nil)\n            count += 1\n        }\n\n        XCTAssertEqual(count, 7)  // based on the default values of the\n    }\n\n    func test_makeGossipPayload_shouldReset_whenNewMemberChangedStatus() throws {\n        let settings: SWIM.Settings = .init()\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 0))\n        let myselfMember = SWIM.Member(peer: self.myself, status: .alive(incarnation: 0), protocolPeriod: 0)\n        let thirdMember = SWIM.Member(peer: self.third, status: .alive(incarnation: 0), protocolPeriod: 0)\n\n        try self.validateGossip(\n            swim: &swim,\n            expected: [\n                .init(peer: self.second, status: .alive(incarnation: 0), protocolPeriod: 0), myselfMember, thirdMember,\n            ]\n        )\n\n        _ = swim.mark(self.second, as: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]))\n        try self.validateGossip(\n            swim: &swim,\n            expected: [\n                .init(\n                    peer: self.second,\n                    status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 0\n                ),\n                myselfMember,\n                thirdMember,\n            ]\n        )\n        try self.validateGossip(\n            swim: &swim,\n            expected: [\n                .init(\n                    peer: self.second,\n                    status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 0\n                ),\n                myselfMember,\n                thirdMember,\n            ]\n        )\n\n        // turns out it is alive after all, and it bumped its incarnation (it had to, to refute the suspicion)\n        _ = swim.mark(self.second, as: .alive(incarnation: 1))\n\n        try self.validateGossip(\n            swim: &swim,\n            expected: [\n                .init(peer: self.second, status: .alive(incarnation: 1), protocolPeriod: 0),\n                .init(peer: self.third, status: .alive(incarnation: 0), protocolPeriod: 0),\n                myselfMember,\n            ]\n        )\n    }\n\n    func test_makeGossipPayload_shouldReset_whenNewMembersJoin() throws {\n        let settings: SWIM.Settings = .init()\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n        let myselfMember = SWIM.Member(peer: self.myself, status: .alive(incarnation: 0), protocolPeriod: 0)\n\n        try self.validateGossip(\n            swim: &swim,\n            expected: [.init(peer: self.second, status: .alive(incarnation: 0), protocolPeriod: 0), myselfMember]\n        )\n\n        _ = swim.mark(self.second, as: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]))\n        try self.validateGossip(\n            swim: &swim,\n            expected: [\n                .init(\n                    peer: self.second,\n                    status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 0\n                ), myselfMember,\n            ]\n        )\n        try self.validateGossip(\n            swim: &swim,\n            expected: [\n                .init(\n                    peer: self.second,\n                    status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 0\n                ), myselfMember,\n            ]\n        )\n        try self.validateGossip(\n            swim: &swim,\n            expected: [\n                .init(\n                    peer: self.second,\n                    status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 0\n                ), myselfMember,\n            ]\n        )\n        try self.validateGossip(\n            swim: &swim,\n            expected: [\n                .init(\n                    peer: self.second,\n                    status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 0\n                ), myselfMember,\n            ]\n        )\n\n        // a new member joins, and we must ensure it'd get some of the gossip\n        _ = swim.addMember(self.third, status: .alive(incarnation: 0))\n\n        try self.validateGossip(\n            swim: &swim,\n            expected: [\n                .init(\n                    peer: self.second,\n                    status: .suspect(incarnation: 0, suspectedBy: [self.thirdNode]),\n                    protocolPeriod: 0\n                ),\n                .init(peer: self.third, status: .alive(incarnation: 0), protocolPeriod: 0),\n                myselfMember,\n            ]\n        )\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Confirming dead\n\n    func test_confirmDead_anUnknownNode_shouldDoNothing() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        let directive = swim.confirmDead(peer: self.second)\n        switch directive {\n        case .ignored:\n            ()  // ok\n        default:\n            XCTFail(\"Expected marking an unknown node to be ignored, got: \\(directive)\")\n        }\n    }\n\n    func test_confirmDead_aKnownOtherNode_shouldApply() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 10))\n\n        let directive = swim.confirmDead(peer: self.second)\n        switch directive {\n        case .applied(let change):\n            let previousStatus = change.previousStatus\n            let member = change.member\n            XCTAssertEqual(previousStatus, SWIM.Status.alive(incarnation: 10))\n            XCTAssertEqual(\"\\(reflecting: member.peer)\", \"\\(reflecting: self.second!)\")\n        default:\n            XCTFail(\"Expected confirmingDead a node to be `.applied`, got: \\(directive)\")\n        }\n    }\n\n    func test_confirmDead_myself_shouldApply() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 10))\n\n        let directive = swim.confirmDead(peer: self.myself)\n        switch directive {\n        case .applied(let change):\n            let previousStatus = change.previousStatus\n            let member = change.member\n            XCTAssertEqual(previousStatus, SWIM.Status.alive(incarnation: 0))\n            XCTAssertEqual(\"\\(reflecting: member.peer)\", \"\\(reflecting: self.myself!)\")\n        default:\n            XCTFail(\"Expected confirmingDead a node to be `.applied`, got: \\(directive)\")\n        }\n    }\n\n    func test_confirmDead_shouldRemovePeerFromMembersToPing() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 10))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 10))\n\n        let secondMember = swim.member(forNode: self.secondNode)!\n\n        _ = swim.confirmDead(peer: self.second)\n        XCTAssertFalse(swim.membersToPing.contains(secondMember))\n\n        XCTAssertNotEqual(swim.nextPeerToPing()?.node, self.second.node)\n        XCTAssertNotEqual(swim.nextPeerToPing()?.node, self.second.node)\n        XCTAssertNotEqual(swim.nextPeerToPing()?.node, self.second.node)\n        XCTAssertNotEqual(swim.nextPeerToPing()?.node, self.second.node)\n        XCTAssertNotEqual(swim.nextPeerToPing()?.node, self.second.node)\n    }\n\n    func test_confirmDead_shouldStoreATombstone_disallowAddingAgain() throws {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 10))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 10))\n\n        let secondMember = swim.member(forNode: self.secondNode)!\n\n        _ = swim.confirmDead(peer: self.second)\n        XCTAssertFalse(swim.members.contains(secondMember))\n        XCTAssertFalse(swim.membersToPing.contains(secondMember))\n\n        // \"you are already dead\"\n        let directives = swim.addMember(self.second, status: .alive(incarnation: 100))\n\n        // no mercy for zombies; don't add it again\n        XCTAssertTrue(directives.count == 1)\n        switch directives.first {\n        case .memberAlreadyKnownDead(let dead):\n            XCTAssertEqual(dead.status, SWIM.Status.dead)\n            XCTAssertEqual(dead.node, self.secondNode)\n        default:\n            XCTFail(\"\")\n        }\n        XCTAssertFalse(swim.members.contains(secondMember))\n        XCTAssertFalse(swim.membersToPing.contains(secondMember))\n    }\n\n    func test_confirmDead_tombstone_shouldExpireAfterConfiguredAmountOfTicks() throws {\n        var settings = SWIM.Settings()\n        settings.tombstoneCleanupIntervalInTicks = 3\n        settings.tombstoneTimeToLiveInTicks = 2\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 10))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 10))\n\n        let secondMember = swim.member(forNode: self.secondNode)!\n\n        _ = swim.confirmDead(peer: self.second)\n        XCTAssertFalse(swim.membersToPing.contains(secondMember))\n\n        XCTAssertTrue(\n            swim.removedDeadMemberTombstones\n                .contains(.init(uid: self.secondNode.uid!, deadlineProtocolPeriod: 0 /* not part of equality*/))\n        )\n\n        _ = swim.onPeriodicPingTick()\n        _ = swim.onPeriodicPingTick()\n\n        XCTAssertTrue(\n            swim.removedDeadMemberTombstones\n                .contains(.init(uid: self.secondNode.uid!, deadlineProtocolPeriod: 0 /* not part of equality*/))\n        )\n\n        _ = swim.onPeriodicPingTick()\n        _ = swim.onPeriodicPingTick()\n\n        XCTAssertFalse(\n            swim.removedDeadMemberTombstones\n                .contains(.init(uid: self.secondNode.uid!, deadlineProtocolPeriod: 0 /* not part of equality*/))\n        )\n\n        // past the deadline and tombstone expiration, we'd be able to smuggle in that node again...!\n        _ = swim.addMember(self.second, status: .alive(incarnation: 135_342))\n        let member = swim.member(for: self.second)\n        XCTAssertEqual(member?.node, self.secondNode)\n    }\n\n    // ==== ----------------------------------------------------------------------------------------------------------------\n    // MARK: Checks\n\n    /// This test is weird and should \"never\" fail, but it did, on some toolchains.\n    /// This test is to remain here as a check if timeouts or something else would suddenly return unexpected values.\n    func test_log_becauseWeSawItReturnWronglyOnSomeToolchains() {\n        XCTAssertEqual(log2(4.0), 2)\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: utility functions\n\n    func validateMark(\n        swim: inout SWIM.Instance<TestPeer, TestPeer, TestPeer>,\n        member: SWIM.Member<TestPeer>,\n        status: SWIM.Status,\n        shouldSucceed: Bool,\n        file: StaticString = (#file),\n        line: UInt = #line\n    ) throws {\n        try self.validateMark(\n            swim: &swim,\n            peer: member.peer,\n            status: status,\n            shouldSucceed: shouldSucceed,\n            file: file,\n            line: line\n        )\n    }\n\n    func validateMark(\n        swim: inout SWIM.Instance<TestPeer, TestPeer, TestPeer>,\n        peer: TestPeer,\n        status: SWIM.Status,\n        shouldSucceed: Bool,\n        file: StaticString = (#file),\n        line: UInt = #line\n    ) throws {\n        let markResult = swim.mark(peer, as: status)\n\n        if shouldSucceed {\n            guard case .applied = markResult else {\n                XCTFail(\"Expected `.applied`, got `\\(markResult)`\", file: file, line: line)\n                return\n            }\n        } else {\n            guard case .ignoredDueToOlderStatus = markResult else {\n                XCTFail(\"Expected `.ignoredDueToOlderStatus`, got `\\(markResult)`\", file: file, line: line)\n                return\n            }\n        }\n    }\n\n    func validateSuspects(\n        _ swim: SWIM.Instance<TestPeer, TestPeer, TestPeer>,\n        expected: Set<Node>,\n        file: StaticString = (#file),\n        line: UInt = #line\n    ) {\n        XCTAssertEqual(\n            Set(\n                swim.suspects.map {\n                    $0.node\n                }\n            ),\n            expected,\n            file: file,\n            line: line\n        )\n    }\n\n    func validateGossip(\n        swim: inout SWIM.Instance<TestPeer, TestPeer, TestPeer>,\n        expected: Set<SWIM.Member<TestPeer>>,\n        file: StaticString = (#file),\n        line: UInt = #line\n    ) throws {\n        let payload = swim.makeGossipPayload(to: nil)\n        if expected.isEmpty {\n            guard case SWIM.GossipPayload.none = payload else {\n                XCTFail(\"Expected `.none`, but got `\\(payload)`\", file: file, line: line)\n                return\n            }\n        } else {\n            guard case SWIM.GossipPayload.membership(let members) = payload else {\n                XCTFail(\"Expected `.membership`, but got `\\(payload)`\", file: file, line: line)\n                return\n            }\n\n            XCTAssertEqual(Set(members), expected, file: file, line: line)\n        }\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMTests/SWIMMetricsTests.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2020 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Dispatch\nimport Metrics\nimport SWIMTestKit\nimport XCTest\n\n@testable import CoreMetrics\n@testable import SWIM\n\nfinal class SWIMMetricsTests: XCTestCase {\n    let myselfNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7001, uid: 1111)\n    let secondNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7002, uid: 2222)\n    let thirdNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7003, uid: 3333)\n    let fourthNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7004, uid: 4444)\n    let fifthNode = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7005, uid: 5555)\n\n    var myself: TestPeer!\n    var second: TestPeer!\n    var third: TestPeer!\n    var fourth: TestPeer!\n    var fifth: TestPeer!\n\n    var testMetrics: TestMetrics!\n\n    override func setUp() {\n        super.setUp()\n        self.myself = TestPeer(node: self.myselfNode)\n        self.second = TestPeer(node: self.secondNode)\n        self.third = TestPeer(node: self.thirdNode)\n        self.fourth = TestPeer(node: self.fourthNode)\n        self.fifth = TestPeer(node: self.fifthNode)\n\n        self.testMetrics = TestMetrics()\n        MetricsSystem.bootstrapInternal(self.testMetrics)\n    }\n\n    override func tearDown() {\n        super.tearDown()\n        self.myself = nil\n        self.second = nil\n        self.third = nil\n        self.fourth = nil\n        self.fifth = nil\n\n        MetricsSystem.bootstrapInternal(NOOPMetricsHandler.instance)\n    }\n\n    // ==== ------------------------------------------------------------------------------------------------------------\n    // MARK: Metrics tests\n\n    let alive = [(\"status\", \"alive\")]\n    let unreachable = [(\"status\", \"unreachable\")]\n    let dead = [(\"status\", \"dead\")]\n\n    func test_members_becoming_suspect() {\n        var settings = SWIM.Settings()\n        settings.unreachability = .enabled\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        self.expectMembership(swim, alive: 1, unreachable: 0, totalDead: 0)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n        self.expectMembership(swim, alive: 2, unreachable: 0, totalDead: 0)\n\n        _ = swim.addMember(self.third, status: .alive(incarnation: 0))\n        self.expectMembership(swim, alive: 3, unreachable: 0, totalDead: 0)\n\n        _ = swim.addMember(self.fourth, status: .alive(incarnation: 0))\n        _ = swim.onPeriodicPingTick()\n        self.expectMembership(swim, alive: 4, unreachable: 0, totalDead: 0)\n\n        for _ in 0..<10 {\n            _ = swim.onPingResponse(\n                response: .timeout(\n                    target: self.second,\n                    pingRequestOrigin: nil,\n                    timeout: .seconds(1),\n                    sequenceNumber: 0\n                ),\n                pingRequestOrigin: nil,\n                pingRequestSequenceNumber: nil\n            )\n            _ = swim.onPingRequestResponse(.nack(target: self.third, sequenceNumber: 0), pinged: self.second)\n        }\n        expectMembership(swim, suspect: 1)\n\n        for _ in 0..<10 {\n            _ = swim.onPingResponse(\n                response: .timeout(target: self.third, pingRequestOrigin: nil, timeout: .seconds(1), sequenceNumber: 0),\n                pingRequestOrigin: nil,\n                pingRequestSequenceNumber: nil\n            )\n        }\n        expectMembership(swim, suspect: 2)\n    }\n\n    enum DowningMode {\n        case unreachableFirst\n        case deadImmediately\n    }\n\n    func test_members_becoming_dead() {\n        self.shared_members(mode: .deadImmediately)\n    }\n\n    func test_members_becoming_unreachable() {\n        self.shared_members(mode: .unreachableFirst)\n    }\n\n    func shared_members(mode: DowningMode) {\n        var settings = SWIM.Settings()\n        switch mode {\n        case .unreachableFirst:\n            settings.unreachability = .enabled\n        case .deadImmediately:\n            settings.unreachability = .disabled\n        }\n        var mockTime = DispatchTime.now()\n        settings.timeSourceNow = { mockTime }\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        self.expectMembership(swim, alive: 1, unreachable: 0, totalDead: 0)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n        self.expectMembership(swim, alive: 2, unreachable: 0, totalDead: 0)\n\n        _ = swim.addMember(self.third, status: .alive(incarnation: 0))\n        self.expectMembership(swim, alive: 3, unreachable: 0, totalDead: 0)\n\n        _ = swim.addMember(self.fourth, status: .alive(incarnation: 0))\n        _ = swim.onPeriodicPingTick()\n        self.expectMembership(swim, alive: 4, unreachable: 0, totalDead: 0)\n\n        let totalMembers = 4\n\n        for _ in 0..<10 {\n            _ = swim.onPingResponse(\n                response: .timeout(\n                    target: self.second,\n                    pingRequestOrigin: nil,\n                    timeout: .seconds(1),\n                    sequenceNumber: 0\n                ),\n                pingRequestOrigin: nil,\n                pingRequestSequenceNumber: nil\n            )\n            mockTime = mockTime + DispatchTimeInterval.seconds(120)\n            _ = swim.onPeriodicPingTick()\n        }\n        let (expectedUnreachables1, expectedDeads1): (Int, Int)\n        switch mode {\n        case .unreachableFirst: (expectedUnreachables1, expectedDeads1) = (1, 0)\n        case .deadImmediately: (expectedUnreachables1, expectedDeads1) = (0, 1)\n        }\n        self.expectMembership(\n            swim,\n            alive: totalMembers - expectedDeads1 - expectedUnreachables1,\n            unreachable: expectedUnreachables1,\n            totalDead: expectedDeads1\n        )\n\n        for _ in 0..<10 {\n            _ = swim.onPingResponse(\n                response: .timeout(target: self.third, pingRequestOrigin: nil, timeout: .seconds(1), sequenceNumber: 0),\n                pingRequestOrigin: nil,\n                pingRequestSequenceNumber: nil\n            )\n            mockTime = mockTime + DispatchTimeInterval.seconds(120)\n            _ = swim.onPeriodicPingTick()\n        }\n        let (expectedUnreachables2, expectedDeads2): (Int, Int)\n        switch mode {\n        case .unreachableFirst: (expectedUnreachables2, expectedDeads2) = (2, 0)\n        case .deadImmediately: (expectedUnreachables2, expectedDeads2) = (0, 2)\n        }\n        self.expectMembership(\n            swim,\n            alive: totalMembers - expectedDeads2 - expectedUnreachables2,\n            unreachable: expectedUnreachables2,\n            totalDead: expectedDeads2\n        )\n\n        if mode == .unreachableFirst {\n            _ = swim.confirmDead(peer: self.second)\n            self.expectMembership(\n                swim,\n                alive: totalMembers - expectedDeads2 - expectedUnreachables2,\n                unreachable: expectedUnreachables2 - 1,\n                totalDead: expectedDeads2 + 1\n            )\n\n            let gotRemovedDeadTombstones = try! self.testMetrics.expectRecorder(\n                swim.metrics.removedDeadMemberTombstones\n            ).lastValue!\n            XCTAssertEqual(gotRemovedDeadTombstones, Double(expectedDeads2 + 1))\n        }\n    }\n\n    func test_lha_adjustment() {\n        let settings = SWIM.Settings()\n        var swim = SWIM.Instance<TestPeer, TestPeer, TestPeer>(settings: settings, myself: self.myself)\n\n        _ = swim.addMember(self.second, status: .alive(incarnation: 0))\n        _ = swim.addMember(self.third, status: .alive(incarnation: 0))\n\n        XCTAssertEqual(try! self.testMetrics.expectRecorder(swim.metrics.localHealthMultiplier).lastValue, Double(0))\n\n        swim.adjustLHMultiplier(.failedProbe)\n        XCTAssertEqual(try! self.testMetrics.expectRecorder(swim.metrics.localHealthMultiplier).lastValue, Double(1))\n\n        swim.adjustLHMultiplier(.failedProbe)\n        XCTAssertEqual(try! self.testMetrics.expectRecorder(swim.metrics.localHealthMultiplier).lastValue, Double(2))\n\n        swim.adjustLHMultiplier(.successfulProbe)\n        XCTAssertEqual(try! self.testMetrics.expectRecorder(swim.metrics.localHealthMultiplier).lastValue, Double(1))\n    }\n}\n\n// ==== ----------------------------------------------------------------------------------------------------------------\n// MARK: Assertions\n\nextension SWIMMetricsTests {\n    private func expectMembership(\n        _ swim: SWIM.Instance<TestPeer, TestPeer, TestPeer>,\n        suspect: Int,\n        file: StaticString = #file,\n        line: UInt = #line\n    ) {\n        let m: SWIM.Metrics = swim.metrics\n\n        let gotSuspect: Double? = try! self.testMetrics.expectRecorder(m.membersSuspect).lastValue\n        XCTAssertEqual(\n            gotSuspect,\n            Double(suspect),\n            \"\"\"\n            Expected \\(suspect) [alive] members, was: \\(String(reflecting: gotSuspect)); Members:\n            \\(swim.members.map(\\.description).joined(separator: \"\\n\"))\n            \"\"\",\n            file: file,\n            line: line\n        )\n    }\n\n    private func expectMembership(\n        _ swim: SWIM.Instance<TestPeer, TestPeer, TestPeer>,\n        alive: Int,\n        unreachable: Int,\n        totalDead: Int,\n        file: StaticString = #file,\n        line: UInt = #line\n    ) {\n        let m: SWIM.Metrics = swim.metrics\n\n        let gotAlive: Double? = try! self.testMetrics.expectRecorder(m.membersAlive).lastValue\n        XCTAssertEqual(\n            gotAlive,\n            Double(alive),\n            \"\"\"\n            Expected \\(alive) [alive] members, was: \\(String(reflecting: gotAlive)); Members:\n            \\(swim.members.map(\\.description).joined(separator: \"\\n\"))\n            \"\"\",\n            file: file,\n            line: line\n        )\n\n        let gotUnreachable: Double? = try! self.testMetrics.expectRecorder(m.membersUnreachable).lastValue\n        XCTAssertEqual(\n            gotUnreachable,\n            Double(unreachable),\n            \"\"\"\n            Expected \\(unreachable) [unreachable] members, was: \\(String(reflecting: gotUnreachable)); Members:\n            \\(swim.members.map(\\.description).joined(separator: \"\\n\")))\n            \"\"\",\n            file: file,\n            line: line\n        )\n\n        let gotTotalDead: Int64? = try! self.testMetrics.expectCounter(m.membersTotalDead).totalValue\n        XCTAssertEqual(\n            gotTotalDead,\n            Int64(totalDead),\n            \"\"\"\n            Expected \\(totalDead) [dead] members, was: \\(String(reflecting: gotTotalDead)); Members:\n            \\(swim.members.map(\\.description).joined(separator: \"\\n\"))\n            \"\"\",\n            file: file,\n            line: line\n        )\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMTests/SWIMSettingsTests.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2019 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport XCTest\n\n@testable import SWIM\n\nfinal class SWIMSettingsTests: XCTestCase {\n    func test_gossipedEnoughTimes() {\n        let settings = SWIM.Settings()\n\n        let node = ClusterMembership.Node(protocol: \"test\", host: \"127.0.0.1\", port: 7001, uid: 1111)\n        let member = SWIM.Member(peer: TestPeer(node: node), status: .alive(incarnation: 0), protocolPeriod: 0)\n        var g = SWIM.Gossip(member: member, numberOfTimesGossiped: 0)\n\n        var members = 0\n\n        // just 1 member, means no other peers thus we dont have to gossip ever\n        members = 1\n        g.numberOfTimesGossiped = 0\n        XCTAssertEqual(settings.gossip.gossipedEnoughTimes(g, members: members), false)\n        g.numberOfTimesGossiped = 1\n        XCTAssertEqual(settings.gossip.gossipedEnoughTimes(g, members: members), false)\n\n        members = 2\n        g.numberOfTimesGossiped = 0\n        for _ in 0...3 {\n            XCTAssertEqual(settings.gossip.gossipedEnoughTimes(g, members: members), false)\n            g.numberOfTimesGossiped += 1\n        }\n\n        members = 10\n        g.numberOfTimesGossiped = 0\n        for _ in 0...9 {\n            XCTAssertEqual(settings.gossip.gossipedEnoughTimes(g, members: members), false)\n            g.numberOfTimesGossiped += 1\n        }\n\n        members = 50\n        g.numberOfTimesGossiped = 0\n        for _ in 0...16 {\n            XCTAssertEqual(settings.gossip.gossipedEnoughTimes(g, members: members), false)\n            g.numberOfTimesGossiped += 1\n        }\n\n        members = 200\n        g.numberOfTimesGossiped = 0\n        for _ in 0...21 {\n            XCTAssertEqual(settings.gossip.gossipedEnoughTimes(g, members: members), false)\n            g.numberOfTimesGossiped += 1\n        }\n    }\n}\n"
  },
  {
    "path": "Tests/SWIMTests/TestPeer.swift",
    "content": "//===----------------------------------------------------------------------===//\n//\n// This source file is part of the Swift Cluster Membership open source project\n//\n// Copyright (c) 2018-2022 Apple Inc. and the Swift Cluster Membership project authors\n// Licensed under Apache License v2.0\n//\n// See LICENSE.txt for license information\n// See CONTRIBUTORS.txt for the list of Swift Cluster Membership project authors\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n//===----------------------------------------------------------------------===//\n\nimport ClusterMembership\nimport Dispatch\nimport XCTest\n\n@testable import SWIM\n\nfinal class TestPeer: Hashable, SWIMPeer, SWIMPingOriginPeer, SWIMPingRequestOriginPeer, CustomStringConvertible {\n    var swimNode: Node\n\n    let semaphore = DispatchSemaphore(value: 1)\n    var messages: [TestPeer.Message] = []\n\n    enum Message {\n        case ping(\n            payload: SWIM.GossipPayload<TestPeer>,\n            origin: TestPeer,\n            timeout: Duration,\n            sequenceNumber: SWIM.SequenceNumber,\n            continuation: CheckedContinuation<SWIM.PingResponse<TestPeer, TestPeer>, Error>\n        )\n        case pingReq(\n            target: TestPeer,\n            payload: SWIM.GossipPayload<TestPeer>,\n            origin: TestPeer,\n            timeout: Duration,\n            sequenceNumber: SWIM.SequenceNumber,\n            continuation: CheckedContinuation<SWIM.PingResponse<TestPeer, TestPeer>, Error>\n        )\n        case ack(\n            target: TestPeer,\n            incarnation: SWIM.Incarnation,\n            payload: SWIM.GossipPayload<TestPeer>,\n            sequenceNumber: SWIM.SequenceNumber\n        )\n        case nack(\n            target: TestPeer,\n            sequenceNumber: SWIM.SequenceNumber\n        )\n    }\n\n    init(node: Node) {\n        self.swimNode = node\n    }\n\n    func ping(\n        payload: SWIM.GossipPayload<TestPeer>,\n        from pingOrigin: TestPeer,\n        timeout: Duration,\n        sequenceNumber: SWIM.SequenceNumber\n    ) async throws -> SWIM.PingResponse<TestPeer, TestPeer> {\n        self.semaphore.wait()\n        defer { self.semaphore.signal() }\n\n        return try await withCheckedThrowingContinuation { continuation in\n            self.messages.append(\n                .ping(\n                    payload: payload,\n                    origin: pingOrigin,\n                    timeout: timeout,\n                    sequenceNumber: sequenceNumber,\n                    continuation: continuation\n                )\n            )\n        }\n    }\n\n    func pingRequest(\n        target: TestPeer,\n        payload: SWIM.GossipPayload<TestPeer>,\n        from origin: TestPeer,\n        timeout: Duration,\n        sequenceNumber: SWIM.SequenceNumber\n    ) async throws -> SWIM.PingResponse<TestPeer, TestPeer> {\n        self.semaphore.wait()\n        defer { self.semaphore.signal() }\n\n        return try await withCheckedThrowingContinuation { continuation in\n            self.messages.append(\n                .pingReq(\n                    target: target,\n                    payload: payload,\n                    origin: origin,\n                    timeout: timeout,\n                    sequenceNumber: sequenceNumber,\n                    continuation: continuation\n                )\n            )\n        }\n    }\n\n    func ack(\n        acknowledging sequenceNumber: SWIM.SequenceNumber,\n        target: TestPeer,\n        incarnation: SWIM.Incarnation,\n        payload: SWIM.GossipPayload<TestPeer>\n    ) {\n        self.semaphore.wait()\n        defer { self.semaphore.signal() }\n\n        self.messages.append(\n            .ack(target: target, incarnation: incarnation, payload: payload, sequenceNumber: sequenceNumber)\n        )\n    }\n\n    func nack(\n        acknowledging sequenceNumber: SWIM.SequenceNumber,\n        target: TestPeer\n    ) {\n        self.semaphore.wait()\n        defer { self.semaphore.signal() }\n\n        self.messages.append(.nack(target: target, sequenceNumber: sequenceNumber))\n    }\n\n    func hash(into hasher: inout Hasher) {\n        hasher.combine(self.node)\n    }\n\n    static func == (lhs: TestPeer, rhs: TestPeer) -> Bool {\n        if lhs === rhs {\n            return true\n        }\n        if type(of: lhs) != type(of: rhs) {\n            return false\n        }\n        if lhs.node != rhs.node {\n            return false\n        }\n        return true\n    }\n\n    var description: String {\n        \"TestPeer(\\(self.swimNode))\"\n    }\n}\n"
  },
  {
    "path": "dev/git.commit.template",
    "content": "# One line description of your change\n\n**Motivation:**\n\n# Explain here the context and why you're making that change. What is the problem you're trying to solve?\n\n**Modifications:**\n\n# Describe the modifications you've done.\n\n**Result:**\n\n- Resolves #\n"
  }
]