[
  {
    "path": ".devcontainer/devcontainer.json",
    "content": "{\n  \"$schema\": \"https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json\",\n  \"name\": \"k3s Terraform module - Dev Container\",\n  \"image\": \"mcr.microsoft.com/vscode/devcontainers/universal\",\n  \"features\": {\n    \"ghcr.io/devcontainers-contrib/features/yamllint:2.0.9\": {},\n    \"ghcr.io/devcontainers/features/terraform:1.4.2\": {\n      \"version\": \"1.6.2\"\n    },\n    \"ghcr.io/devcontainers-contrib/features/go-task:1.0.5\": {},\n    \"ghcr.io/dhoeric/features/terraform-docs:1.0.0\": {\n      \"version\": \"0.16.0\"\n    },\n    \"ghcr.io/itsmechlark/features/act:1.0.0\": {},\n    \"ghcr.io/itsmechlark/features/trivy:1.0.0\": {}\n  },\n  \"customizations\": {\n    \"vscode\": {\n      \"extensions\": [\n        \"bierner.github-markdown-preview\",\n        \"github.copilot\",\n        \"ms-vscode.makefile-tools\",\n        \"redhat.vscode-yaml\",\n        \"tylerharris.terraform-link-docs\",\n        \"yzhang.markdown-all-in-one\",\n        \"task.vscode-task\"\n      ]\n    }\n  }\n}"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug-report.yaml",
    "content": "name: Bug Report\ndescription: File a bug report for this project\ntitle: \":bug: \"\nlabels: [\"kind/bug\"]\nprojects: [\"xunleii/2\"]\n\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Before opening a new issue, please search existing issues.\n\n        ----\n\n        Thank you for filing a bug report! Please fill out the sections below to help us reproduce the bug.\n\n  - type: textarea\n    id: what_happened\n    attributes:\n      label: \":fire: What happened?\"\n      description: Describe the issue you are experiencing here\n    validations:\n      required: true\n  - type: textarea\n    id: what_expected\n    attributes:\n      label: \":+1: What did you expect to happen?\"\n      description: Describe what you expected to happen here\n    validations:\n      required: false\n  - type: textarea\n    id: how_reproduce\n    attributes:\n      label: \":mag: How can we reproduce the issue?\"\n      description: Describe how to reproduce the problem in as much detail as possible\n    validations:\n      required: true\n\n  - type: input\n    id: module_version\n    attributes:\n      label: \":wrench: Module version\"\n      description: Please provide the version of the module you are using\n    validations:\n      required: true\n  - type: input\n    id: terraform_version\n    attributes:\n      label: \":wrench: Terraform version\"\n      description: Please provide the version of Terraform you are using\n    validations:\n      required: true\n\n  - type: textarea\n    id: provider_list\n    attributes:\n      label: \":wrench: Terraform providers\"\n      description: List all the providers you are using with their version (copy the output of `terraform providers`)\n    validations:\n      required: true\n\n  - type: textarea\n    id: additional_info\n    attributes:\n      label: \":clipboard: Additional information\"\n      description: Please provide any additional information that might be useful\n    validations:\n      required: false\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yaml",
    "content": "blank_issues_enabled: true\n"
  },
  {
    "path": ".github/labels.yaml",
    "content": "- name: kind/bug\n  description: Something isn't working\n  color: D73A4A\n- name: kind/dependencies\n  description: Dependencies upgrade\n  color: 2B098D\n- name: kind/documentation\n  description: Improvements or additions to documentation\n  color: 0075CA\n- name: kind/enhancement\n  description: New feature or request\n  color: A2EEEF\n- name: kind/question\n  description: Further information is requested\n  color: D876E3\n\n- name: size/XS\n  color: 008000\n- name: size/S\n  color: 008000\n- name: size/M\n  color: FFFF00\n- name: size/L\n  color: FF0000\n- name: size/XL\n  color: FF0000\n\n- name: status/stale\n  description: This issue has not had recent activity\n  color: 6A5ACD\n- name: no-stale\n  description: This issue cannot be marked as stale\n  color: 6A5ACD\n\n- name: terraform:plan\n  description: Invoke Terraform plan workflow on the current PR\n  color: 7A55CC\n\n- name: duplicate\n  description: This doesn't seem right\n  color: CFD3D7\n- name: good first issue\n  description: Good for newcomers\n  color: 7057FF\n- name: help wanted\n  description: Extra attention is needed\n  color: 008672\n- name: invalid\n  description: This doesn't seem right\n  color: E4E669\n- name: wontfix\n  description: This will not be worked on\n  color: FFFFFF\n"
  },
  {
    "path": ".github/renovate.json",
    "content": "{\n  \"$schema\": \"https://docs.renovatebot.com/renovate-schema.json\",\n\n  \"assignAutomerge\": true,\n  \"automergeStrategy\": \"auto\",\n  \"dependencyDashboard\": true,\n  \"labels\": [\"kind/dependencies\"],\n  \"prConcurrentLimit\": 5,\n  \"prHourlyLimit\": 0\n}\n"
  },
  {
    "path": ".github/workflows/github.documentation.yaml",
    "content": "---\nname: '[bot] Update documentation assets (master only)'\non:\n  push:\n    branches: [master]\n\njobs:\n  generate-docs-assets:\n    name: Update documentation assets\n    runs-on: ubuntu-latest\n    permissions:\n      contents: write\n    steps:\n      - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1\n      - uses: heinrichreimer/github-changelog-generator-action@e60b5a2bd9fcd88dadf6345ff8327863fb8b490f # v2.4\n        with:\n          token: ${{ secrets.GITHUB_TOKEN }}\n      # NOTE: seems impossible to use terraform-docs/gh-actions with EndBug/add-and-commit... so\n      #       we will do everything manually\n      - name: Generate README.md with terraform-docs\n        run: |\n          mkdir --parent .terraform-docs\n          curl -L \"https://github.com/terraform-docs/terraform-docs/releases/download/v0.16.0/terraform-docs-v0.16.0-$(uname)-amd64.tar.gz\" | tar -xvzC .terraform-docs\n          chmod +x .terraform-docs/terraform-docs\n          \n          .terraform-docs/terraform-docs .\n      - uses: EndBug/add-and-commit@1bad3abcf0d6ec49a5857d124b0bfb52dc7bb081 # v9.1.3\n        with:\n          default_author: github_actions\n          message: \"Update documentation assets\"\n"
  },
  {
    "path": ".github/workflows/github.labeler.yaml",
    "content": "---\nname: '[bot] Synchronize labels'\non:\n  push:\n    branches: [master]\n    paths: [.github/workflows/github.labeler.yaml, .github/labels.yaml]\n  schedule:\n    - cron: '0 0 * * *'\n\njobs:\n  sync:\n    name: Synchronize labels\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1\n      - uses: micnncim/action-label-syncer@3abd5ab72fda571e69fffd97bd4e0033dd5f495c # v1.3.0\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        with:\n          manifest: .github/labels.yaml\n          prune: true\n"
  },
  {
    "path": ".github/workflows/github.stale.yaml",
    "content": "---\nname: '[bot] Close stale issues and PRs'\non:\n  schedule:\n    - cron: '0 0 * * *'\n\njobs:\n  stale:\n    runs-on: ubuntu-latest\n    permissions:\n      contents: write\n      issues: write\n      pull-requests: write\n    steps:\n      - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0\n        with:\n          days-before-close: 7\n          days-before-stale: 30\n          exempt-issue-labels: no-stale\n          exempt-pr-labels: no-stale\n          repo-token: ${{ secrets.GITHUB_TOKEN }}\n          stale-issue-label: status/stale\n          stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. If the issue still persists, please leave a comment and it will be reopened.'\n          stale-pr-label: status/stale\n          stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. If the pull request still needs attention, please leave a comment and it will be reopened.'\n"
  },
  {
    "path": ".github/workflows/security.terraform.yaml",
    "content": "name: Security hardening (Terraform)\n\non:\n  pull_request:\n\njobs:\n  trivy:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1\n      - uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # 0.16.1\n        with:\n          scan-type: config\n          scan-ref: .\n          exit-code: 1\n          severity: UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL\n"
  },
  {
    "path": ".github/workflows/security.workflows.yaml",
    "content": "name: Security hardening (Github Actions workflows)\n\non:\n  pull_request:\n    types: [opened, synchronize]\n    paths: [\".github/workflows/**\"]\n\njobs:\n  ci_harden_security:\n    name: Github Action security hardening\n    runs-on: ubuntu-latest\n    permissions:\n      security-events: write\n    steps:\n      - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1\n\n      - name: Lint your Github Actions\n        run: |\n          curl -O https://raw.githubusercontent.com/rhysd/actionlint/main/.github/actionlint-matcher.json\n\n          echo \"::add-matcher::actionlint-matcher.json\"\n          bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash)\n          ./actionlint -color\n\n      - name: Ensure SHA pinned actions\n        uses: zgosalvez/github-actions-ensure-sha-pinned-actions@70c4af2ed5282c51ba40566d026d6647852ffa3e # v5.0.1\n"
  },
  {
    "path": ".github/workflows/templates.terraform.pull_requests.lint.yaml",
    "content": "name: IaaS - Terraform CI (for pull requests) - Lint\n\non:\n  workflow_call:\n    inputs:\n      terraform_workdir:\n        description: Working directory where Terraform files are\n        required: false\n        default: \".\"\n        type: string\n      terraform_version:\n        description: Terraform version that should we use (latest by default)\n        required: false\n        type: string\n\njobs:\n  # Terraform validate checks if your TF files are in a canonical format and without HCL issues\n  terraform_validate:\n    name: Terraform files validation\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1\n      - uses: hashicorp/setup-terraform@5e8dbf3c6d9deaf4193ca7a8fb23f2ac83bb6c85 # v4.0.0\n        with:\n          terraform_version: ${{ inputs.terraform_version }}\n      - name: Pre-hook Terraform workflow\n        id: pre\n        run: |\n          # Setup `workdir` suffix used to give more information during execution\n          if [[ '${{ inputs.terraform_workdir }}' == '.' ]]; then\n            echo \"workdir=\" >> \"${GITHUB_OUTPUT}\"\n          else\n            echo \"workdir=(${{ inputs.terraform_workdir }})\" >> \"${GITHUB_OUTPUT}\"\n          fi\n\n      # --- `terraform fmt`\n      - name: Check if all Terraform configuration files are in a canonical format ${{ steps.pre.outputs.workdir }}\n        id: fmt\n        run: terraform fmt -check -recursive -diff -no-color\n        working-directory: ${{ inputs.terraform_workdir }}\n      - uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2\n        if: failure() && steps.fmt.outcome == 'failure'\n        with:\n          recreate: true\n          header: tf::${{ steps.pre.outputs.workdir }}\n          message: |\n            # Terraform CI/CD ${{ steps.pre.outputs.workdir }}\n\n            - [ ] :paintbrush: Check if all Terraform configuration files are in a canonical format\n\n            ### 🚫 Failure reason\n            ```terraform\n            ${{ steps.fmt.outputs.stdout }}\n            ```\n            <br/>\n\n            > _Report based on commit ${{ github.sha }} (authored by **@${{ github.actor }}**).  See [`actions#${{ github.run_id }}`](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details._\n\n      # --- `terraform init`\n      - name: Initialize Terraform working directory ${{ steps.pre.outputs.workdir }}\n        id: init\n        env:\n          TF_IN_AUTOMATION: yes\n        run: terraform init -no-color -backend=false\n        working-directory: ${{ inputs.terraform_workdir }}\n      - uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2\n        if: failure() && steps.init.outcome == 'failure'\n        with:\n          recreate: true\n          header: tf::${{ steps.pre.outputs.workdir }}\n          message: |\n            # Terraform CI/CD ${{ steps.pre.outputs.workdir }}\n\n            - [x] :paintbrush: Check if all Terraform configuration files are in a canonical format\n            - [ ] :hammer_and_wrench: Validate the configuration files\n\n            ### 🚫 Failure reason\n            ```\n            ${{ steps.init.outputs.stderr }}\n            ```\n            <br/>\n\n            > _Report based on commit ${{ github.sha }} (authored by **@${{ github.actor }}**).  See [`actions#${{ github.run_id }}`](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details._\n\n      # --- `terraform validate`\n      - name: Validate the configuration files ${{ steps.pre.outputs.workdir }}\n        id: validate\n        env:\n          TF_IN_AUTOMATION: yes\n        run: terraform validate -no-color\n        working-directory: ${{ inputs.terraform_workdir }}\n      - uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2\n        if: failure() && steps.validate.outcome == 'failure'\n        with:\n          recreate: true\n          header: tf::${{ steps.pre.outputs.workdir }}\n          message: |\n            # Terraform CI/CD ${{ steps.pre.outputs.workdir }}\n\n            - [x] :paintbrush: Check if all Terraform configuration files are in a canonical format\n            - [ ] :hammer_and_wrench: Validate the configuration files\n\n            ### 🚫 Failure reason\n            ```\n            ${{ steps.validate.outputs.stderr }}\n            ```\n            <br/>\n\n            > _Report based on commit ${{ github.sha }} (authored by **@${{ github.actor }}**).  See [`actions#${{ github.run_id }}`](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details._\n      - uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2\n        if: success()\n        with:\n          recreate: true\n          header: tf::${{ steps.pre.outputs.workdir }}\n          message: |\n            # Terraform CI/CD ${{ steps.pre.outputs.workdir }}\n\n            - [x] :paintbrush: Check if all Terraform configuration files are in a canonical format\n            - [x] :hammer_and_wrench: Validate the configuration files\n\n            > _Report based on commit ${{ github.sha }} (authored by **@${{ github.actor }}**).  See [`actions#${{ github.run_id }}`](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details._"
  },
  {
    "path": ".github/workflows/templates.terraform.pull_requests.plan.yaml",
    "content": "name: IaaS - Terraform CI (for pull requests) - Plan\n\non:\n  workflow_call:\n    inputs:\n      after_lint:\n        default: true\n        description: Is this workflow run after lint?\n        required: false\n        type: boolean\n\n      env:\n        description: List of environment variables to set (YAML formatted)\n        required: false\n        type: string\n\n      terraform_vars:\n        description: Terraform variables (YAML formatted)\n        required: false\n        type: string\n\n      terraform_version:\n        description: Terraform version that should we use (latest by default)\n        required: false\n        type: string\n      terraform_workdir:\n        description: Working directory where Terraform files are\n        required: false\n        default: \".\"\n        type: string\n\n    secrets:\n      env:\n        description: List of sensitive environment variables to set (YAML formatted)\n        required: false\n\n      terraform_vars:\n        description: Sensitive Terraform variables (YAML formatted)\n        required: false\n\njobs:\n  # Terraform plan generated the speculative execution plan\n  terraform_plan:\n    name: Generate a speculative execution plan\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1\n      - uses: hashicorp/setup-terraform@5e8dbf3c6d9deaf4193ca7a8fb23f2ac83bb6c85 # v4.0.0\n        with:\n          terraform_version: ${{ inputs.terraform_version }}\n      - name: Pre-hook Terraform workflow\n        id: pre\n        run: |\n          import os\n          import yaml\n\n          # Setup `workdir` suffix used to give more information during execution\n\n          with open(os.getenv('GITHUB_OUTPUT'), 'a') as output:\n            if '${{ inputs.terraform_workdir }}' == '.':\n              output.write('workdir=\\n')\n            else:\n              output.write('workdir=(${{ inputs.terraform_workdir }})\\n')\n\n            if '${{ inputs.after_lint }}' == 'true':\n              output.write('- [x] :paintbrush: Check if all Terraform configuration files are in a canonical format\\n')\n              output.write('- [x] :hammer_and_wrench: Validate the configuration files\\n')\n            else:\n              output.write('lint_fmt_success=\"\"\\n')\n              output.write('lint_val_success=\"\"\\n')\n\n          # Import Terraform variables\n  \n          tf_env = '''\n          ${{ inputs.env }}\n          ${{ secrets.env }}\n          '''\n\n          tf_vars = '''\n          ${{ inputs.terraform_vars }}\n          ${{ secrets.terraform_vars }}\n          '''\n\n          with open(os.getenv('GITHUB_ENV'), 'a') as env:\n            if tf_env.strip():\n              for var in yaml.safe_load(tf_env).items():\n                env.write('%s=%s\\n' % var)\n            if tf_vars.strip():\n              for var in yaml.safe_load(tf_vars).items():\n                env.write('TF_VAR_%s=%s\\n' % var)\n        shell: python\n\n      # --- `terraform init`\n      - name: Initialize Terraform working directory ${{ steps.pre.outputs.workdir }}\n        id: init\n        env:\n          TF_IN_AUTOMATION: yes\n        run: terraform init -no-color -backend=false\n        working-directory: ${{ inputs.terraform_workdir }}\n      - uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2\n        if: failure() && steps.init.outcome == 'failure'\n        with:\n          recreate: true\n          header: tf::${{ steps.pre.outputs.workdir }}\n          message: |\n            # Terraform CI/CD ${{ steps.pre.outputs.workdir }}\n\n            ${{ steps.pre.outputs.lint_fmt_success }}\n            ${{ steps.pre.outputs.lint_val_success }}\n            - [ ] :scroll: Generate a speculative execution plan\n\n            ### 🚫 Failure reason\n            ```\n            ${{ steps.init.outputs.stderr }}\n            ```\n            <br/>\n\n            > _Report based on commit ${{ github.sha }} (authored by **@${{ github.actor }}**).  See [`actions#${{ github.run_id }}`](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details._\n\n      # --- `terraform plan`\n      - name: Generate a speculative execution plan ${{ steps.pre.outputs.workdir }}\n        id: plan\n        env:\n          TF_IN_AUTOMATION: yes\n        run: terraform plan -input=false -no-color -parallelism=30 -compact-warnings\n        working-directory: ${{ inputs.terraform_workdir }}\n      - uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2\n        if: failure() && steps.plan.outcome == 'failure'\n        with:\n          recreate: true\n          header: tf::${{ steps.pre.outputs.workdir }}\n          message: |\n            # Terraform CI/CD ${{ steps.pre.outputs.workdir }}\n\n            ${{ steps.pre.outputs.lint_fmt_success }}\n            ${{ steps.pre.outputs.lint_val_success }}\n            - [ ] :scroll: Generate a speculative execution plan\n\n            ### 🚫 Failure reason\n            ```\n            ${{ steps.plan.outputs.stderr }}\n            ```\n            <br/>\n\n            > _Report based on commit ${{ github.sha }} (authored by **@${{ github.actor }}**).  See [`actions#${{ github.run_id }}`](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details._\n      - uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2\n        if: success()\n        with:\n          recreate: true\n          header: tf::${{ steps.pre.outputs.workdir }}\n          message: |\n            # Terraform CI/CD ${{ steps.pre.outputs.workdir }}\n\n            ${{ steps.pre.outputs.lint_fmt_success }}\n            ${{ steps.pre.outputs.lint_val_success }}\n            - [x] :scroll: Generate a speculative execution plan\n\n            ### Terraform Plan output\n            ```terraform\n            ${{ steps.plan.outputs.stdout }}\n            ```\n            <br/>\n\n            > _Report based on commit ${{ github.sha }} (authored by **@${{ github.actor }}**).  See [`actions#${{ github.run_id }}`](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details._"
  },
  {
    "path": ".github/workflows/terraform.lint.yaml",
    "content": "name: Terraform HCL validation (PRs only)\n\non:\n  pull_request:\n    paths: [\"**.tf\"]\n\npermissions:\n  pull-requests: write\n\njobs:\n  terraform-module-k3s:\n    name: Terraform module\n    uses: ./.github/workflows/templates.terraform.pull_requests.lint.yaml\n\n  examples_hcloud-k3s:\n    name: Hetzner Cloud\n    needs: [terraform-module-k3s]\n    uses: ./.github/workflows/templates.terraform.pull_requests.lint.yaml\n    with:\n      terraform_workdir: examples/hcloud-k3s\n\n  examples_civo-k3s:\n    name: CIVO\n    needs: [terraform-module-k3s]\n    uses: ./.github/workflows/templates.terraform.pull_requests.lint.yaml\n    with:\n      terraform_workdir: examples/civo-k3s\n"
  },
  {
    "path": ".github/workflows/terraform.plan.yaml",
    "content": "name: Terraform plan validation (PRs only)\n\non:\n  pull_request:\n    types: [labeled]\n\npermissions:\n  pull-requests: write\n\njobs:\n  examples_hcloud-k3s:\n    name: Hetzner Cloud\n    if: ${{ github.event.label.name == 'terraform:plan' }}\n    permissions:\n      pull-requests: write\n    secrets:\n      env: |\n        HCLOUD_TOKEN: ${{ secrets.HCLOUD_TOKEN }}\n    uses: ./.github/workflows/templates.terraform.pull_requests.plan.yaml\n    with:\n      terraform_vars: |\n        ssh_key: ''\n      terraform_workdir: examples/hcloud-k3s\n\n  unlabel-pull-request:\n    if: always()\n    name: Remove 'terraform:plan' label\n    needs: [examples_hcloud-k3s]\n    runs-on: ubuntu-latest\n    steps:\n      - name: Unlabel 'terraform:plan'\n        uses: actions-ecosystem/action-remove-labels@d05162525702062b6bdef750ed8594fc024b3ed7\n        with:\n          labels: terraform:plan\n"
  },
  {
    "path": ".github_changelog_generator",
    "content": "add-sections={\"dependencies\":{\"prefix\":\"**Dependencies upgrades:**\", \"labels\":[\"kind/dependencies\"]}}\nproject=terraform-module-k3s\nuser=xunleii\n"
  },
  {
    "path": ".gitignore",
    "content": "# Created by https://www.toptal.com/developers/gitignore/api/linux,windows,macos,terraform\n# Edit at https://www.toptal.com/developers/gitignore?templates=linux,windows,macos,terraform\n\n### Linux ###\n*~\n\n# temporary files which can be created if a process still has a handle open of a deleted file\n.fuse_hidden*\n\n# KDE directory preferences\n.directory\n\n# Linux trash folder which might appear on any partition or disk\n.Trash-*\n\n# .nfs files are created when an open file is removed but is still being accessed\n.nfs*\n\n### macOS ###\n# General\n.DS_Store\n.AppleDouble\n.LSOverride\n\n# Icon must end with two \\r\nIcon\n\n\n# Thumbnails\n._*\n\n# Files that might appear in the root of a volume\n.DocumentRevisions-V100\n.fseventsd\n.Spotlight-V100\n.TemporaryItems\n.Trashes\n.VolumeIcon.icns\n.com.apple.timemachine.donotpresent\n\n# Directories potentially created on remote AFP share\n.AppleDB\n.AppleDesktop\nNetwork Trash Folder\nTemporary Items\n.apdisk\n\n### macOS Patch ###\n# iCloud generated files\n*.icloud\n\n### Terraform ###\n# Local .terraform directories\n**/.terraform/*\n\n# .tfstate files\n*.tfstate\n*.tfstate.*\n\n# Crash log files\ncrash.log\ncrash.*.log\n\n# Exclude all .tfvars files, which are likely to contain sensitive data, such as\n# password, private keys, and other secrets. These should not be part of version\n# control as they are data points which are potentially sensitive and subject\n# to change depending on the environment.\n*.tfvars\n*.tfvars.json\n\n# Ignore override files as they are usually used to override resources locally and so\n# are not checked in\noverride.tf\noverride.tf.json\n*_override.tf\n*_override.tf.json\n\n# Include override files you do wish to add to version control using negated pattern\n# !example_override.tf\n\n# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan\n# example: *tfplan*\n\n# Ignore CLI configuration files\n.terraformrc\nterraform.rc\n\n# Ignore temporary TF docs folder\n.terraform-docs/\n\n### Windows ###\n# Windows thumbnail cache files\nThumbs.db\nThumbs.db:encryptable\nehthumbs.db\nehthumbs_vista.db\n\n# Dump file\n*.stackdump\n\n# Folder config file\n[Dd]esktop.ini\n\n# Recycle Bin used on file shares\n$RECYCLE.BIN/\n\n# Windows Installer files\n*.cab\n*.msi\n*.msix\n*.msm\n*.msp\n\n# Windows shortcuts\n*.lnk\n\n# End of https://www.toptal.com/developers/gitignore/api/linux,windows,macos,terraform\n\n### Taskfile ###\n# Ignore taskfile generated files\n.task/\n"
  },
  {
    "path": ".terraform-docs.yaml",
    "content": "formatter: \"markdown table\"\n\ncontent: |-\n  ## Example _(based on [Hetzner Cloud example](examples/hcloud-k3s))_\n\n  ```hcl\n  {{ include \"examples/hcloud-k3s/k3s.tf\" | replace \"./../..\" \"xunleii/k3s/module\" }}\n  ```\n\n  {{ .Inputs | replace \"\\\"|\\\"\" \"\\\"\\\\|\\\"\" }}\n\n  {{ .Outputs }}\n\n  {{ .Providers }}\n\noutput:\n  file: README.md\n  mode: inject\n  template: |-\n    <!-- BEGIN_TF_DOCS -->\n    {{ .Content }}\n    <!-- END_TF_DOCS -->\n\nsort:\n  enabled: true\n  by: required\n"
  },
  {
    "path": ".tool-versions",
    "content": "act 0.2.57\ntask 3.31.0\nterraform 1.14.6\nterraform-docs 0.20.0\ntrivy 0.69.3\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\n## [Unreleased](https://github.com/xunleii/terraform-module-k3s/tree/HEAD)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v3.4.0...HEAD)\n\n**Dependencies upgrades:**\n\n- chore\\(deps\\): update hashicorp/setup-terraform action to v4 [\\#225](https://github.com/xunleii/terraform-module-k3s/pull/225) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update zgosalvez/github-actions-ensure-sha-pinned-actions action to v5 [\\#223](https://github.com/xunleii/terraform-module-k3s/pull/223) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency trivy to v0.69.3 [\\#221](https://github.com/xunleii/terraform-module-k3s/pull/221) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency terraform to v1.14.6 [\\#214](https://github.com/xunleii/terraform-module-k3s/pull/214) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update ghcr.io/devcontainers/features/terraform docker tag to v1.4.2 [\\#213](https://github.com/xunleii/terraform-module-k3s/pull/213) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update marocchino/sticky-pull-request-comment action to v2.9.2 [\\#203](https://github.com/xunleii/terraform-module-k3s/pull/203) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency terraform-docs to v0.20.0 [\\#202](https://github.com/xunleii/terraform-module-k3s/pull/202) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update zgosalvez/github-actions-ensure-sha-pinned-actions action to v3.0.23 - autoclosed [\\#201](https://github.com/xunleii/terraform-module-k3s/pull/201) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update ghcr.io/devcontainers/features/terraform docker tag to v1.3.10 [\\#200](https://github.com/xunleii/terraform-module-k3s/pull/200) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency terraform-docs to v0.17.0 [\\#163](https://github.com/xunleii/terraform-module-k3s/pull/163) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update actions/stale action to v9 [\\#162](https://github.com/xunleii/terraform-module-k3s/pull/162) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update heinrichreimer/github-changelog-generator-action action to v2.4 [\\#161](https://github.com/xunleii/terraform-module-k3s/pull/161) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency trivy to v0.48.2 [\\#160](https://github.com/xunleii/terraform-module-k3s/pull/160) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update aquasecurity/trivy-action action to v0.16.1 [\\#159](https://github.com/xunleii/terraform-module-k3s/pull/159) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency act to v0.2.57 [\\#158](https://github.com/xunleii/terraform-module-k3s/pull/158) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update zgosalvez/github-actions-ensure-sha-pinned-actions action to v3.0.3 [\\#157](https://github.com/xunleii/terraform-module-k3s/pull/157) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency terraform to v1.6.6 [\\#156](https://github.com/xunleii/terraform-module-k3s/pull/156) ([renovate[bot]](https://github.com/apps/renovate))\n\n**Closed issues:**\n\n- :unicorn: Add Certificate data as outputs [\\#182](https://github.com/xunleii/terraform-module-k3s/issues/182)\n- Servers must have an odd number of nodes [\\#172](https://github.com/xunleii/terraform-module-k3s/issues/172)\n- :bug:  Cannot scale up server nodes  [\\#153](https://github.com/xunleii/terraform-module-k3s/issues/153)\n- Taking a node out of the configuration keeps the node within the cluster but cordoned [\\#139](https://github.com/xunleii/terraform-module-k3s/issues/139)\n- Consider Integration Testing with k3d [\\#133](https://github.com/xunleii/terraform-module-k3s/issues/133)\n- K3s Cluster Node\\(s\\) Upgrade [\\#132](https://github.com/xunleii/terraform-module-k3s/issues/132)\n- Unable to use on Windows Terraform [\\#95](https://github.com/xunleii/terraform-module-k3s/issues/95)\n\n**Merged pull requests:**\n\n- let k8s\\_ca\\_certificates\\_install depend on var.depends\\_on\\_ [\\#164](https://github.com/xunleii/terraform-module-k3s/pull/164) ([sschaeffner](https://github.com/sschaeffner))\n\n## [v3.4.0](https://github.com/xunleii/terraform-module-k3s/tree/v3.4.0) (2023-11-26)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v3.3.0...v3.4.0)\n\n**Dependencies upgrades:**\n\n- chore\\(deps\\): update dependency terraform to v1.6.4 [\\#154](https://github.com/xunleii/terraform-module-k3s/pull/154) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update zgosalvez/github-actions-ensure-sha-pinned-actions action to v3 [\\#152](https://github.com/xunleii/terraform-module-k3s/pull/152) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update hashicorp/setup-terraform action to v3 [\\#151](https://github.com/xunleii/terraform-module-k3s/pull/151) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update zgosalvez/github-actions-ensure-sha-pinned-actions action to v1.4.1 [\\#150](https://github.com/xunleii/terraform-module-k3s/pull/150) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update marocchino/sticky-pull-request-comment action to v2.8.0 [\\#149](https://github.com/xunleii/terraform-module-k3s/pull/149) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency trivy to v0.47.0 [\\#148](https://github.com/xunleii/terraform-module-k3s/pull/148) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update aquasecurity/trivy-action action to v0.14.0 [\\#147](https://github.com/xunleii/terraform-module-k3s/pull/147) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update hashicorp/setup-terraform action to v2.0.3 [\\#146](https://github.com/xunleii/terraform-module-k3s/pull/146) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency terraform to v1.6.3 [\\#145](https://github.com/xunleii/terraform-module-k3s/pull/145) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update actions/checkout action to v4 [\\#137](https://github.com/xunleii/terraform-module-k3s/pull/137) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update terraform http to v3.4.0 [\\#130](https://github.com/xunleii/terraform-module-k3s/pull/130) ([renovate[bot]](https://github.com/apps/renovate))\n\n**Closed issues:**\n\n- When generate\\_ca\\_certificates = false, module does not export any kubeconfig [\\#143](https://github.com/xunleii/terraform-module-k3s/issues/143)\n- Refresh kubeconfig when terraform state is lost [\\#142](https://github.com/xunleii/terraform-module-k3s/issues/142)\n- terraform destroy gets stuck while draining the last node [\\#138](https://github.com/xunleii/terraform-module-k3s/issues/138)\n- cdktf compatibility  [\\#135](https://github.com/xunleii/terraform-module-k3s/issues/135)\n- hcloud-k3s doesnt work with v3.3.0 [\\#127](https://github.com/xunleii/terraform-module-k3s/issues/127)\n- Generated kubeconfig cannot be used \\(certificate signed by unknown authority\\) [\\#107](https://github.com/xunleii/terraform-module-k3s/issues/107)\n- Cluster CA certificate is not trusted [\\#85](https://github.com/xunleii/terraform-module-k3s/issues/85)\n- Windows Terraform - SSH authentication failed [\\#43](https://github.com/xunleii/terraform-module-k3s/issues/43)\n- Custom k3s cluster name inside of the admin kubeconfig  [\\#144](https://github.com/xunleii/terraform-module-k3s/issues/144)\n- 🚧 Refresh this repository [\\#140](https://github.com/xunleii/terraform-module-k3s/issues/140)\n- Error \"Variable `name` is deprecated\" [\\#136](https://github.com/xunleii/terraform-module-k3s/issues/136)\n\n**Merged pull requests:**\n\n- :recycle: Cleanup this repository [\\#141](https://github.com/xunleii/terraform-module-k3s/pull/141) ([xunleii](https://github.com/xunleii))\n- fix--multi\\_server-install [\\#131](https://github.com/xunleii/terraform-module-k3s/pull/131) ([jacaudi](https://github.com/jacaudi))\n- Fix k3s\\_install\\_env\\_vars and hcloud-k3s example issues [\\#128](https://github.com/xunleii/terraform-module-k3s/pull/128) ([xunleii](https://github.com/xunleii))\n\n## [v3.3.0](https://github.com/xunleii/terraform-module-k3s/tree/v3.3.0) (2023-05-14)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v3.2.0...v3.3.0)\n\n**Dependencies upgrades:**\n\n- chore\\(deps\\): update endbug/add-and-commit action to v9.1.3 [\\#123](https://github.com/xunleii/terraform-module-k3s/pull/123) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update terraform http to v3.3.0 [\\#122](https://github.com/xunleii/terraform-module-k3s/pull/122) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update actions/checkout action to v3.5.2 [\\#121](https://github.com/xunleii/terraform-module-k3s/pull/121) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update terraform random to v3.5.1 [\\#120](https://github.com/xunleii/terraform-module-k3s/pull/120) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update actions/checkout action to v3.5.0 [\\#119](https://github.com/xunleii/terraform-module-k3s/pull/119) ([renovate[bot]](https://github.com/apps/renovate))\n- Update actions/checkout action to v3.4.0 [\\#118](https://github.com/xunleii/terraform-module-k3s/pull/118) ([renovate[bot]](https://github.com/apps/renovate))\n- Update actions/checkout action to v3.3.0 [\\#108](https://github.com/xunleii/terraform-module-k3s/pull/108) ([renovate[bot]](https://github.com/apps/renovate))\n- Update xunleii/github-actions-grimoire digest to 0ab2cd9 [\\#106](https://github.com/xunleii/terraform-module-k3s/pull/106) ([renovate[bot]](https://github.com/apps/renovate))\n- Update actions/checkout action to v3.1.0 [\\#105](https://github.com/xunleii/terraform-module-k3s/pull/105) ([renovate[bot]](https://github.com/apps/renovate))\n- Update EndBug/add-and-commit action to v9.1.1 [\\#102](https://github.com/xunleii/terraform-module-k3s/pull/102) ([renovate[bot]](https://github.com/apps/renovate))\n- Update Terraform http to v3 [\\#101](https://github.com/xunleii/terraform-module-k3s/pull/101) ([renovate[bot]](https://github.com/apps/renovate))\n- Update Terraform tls to v4 [\\#100](https://github.com/xunleii/terraform-module-k3s/pull/100) ([renovate[bot]](https://github.com/apps/renovate))\n\n**Closed issues:**\n\n- API URL broken in build script when using dual stack configs [\\#111](https://github.com/xunleii/terraform-module-k3s/issues/111)\n- Deprecated attribute with Terraform 1.3.7 [\\#110](https://github.com/xunleii/terraform-module-k3s/issues/110)\n- Error: Invalid Attribute Value Match  [\\#104](https://github.com/xunleii/terraform-module-k3s/issues/104)\n\n**Merged pull requests:**\n\n- Update workflows generating documentation assets [\\#125](https://github.com/xunleii/terraform-module-k3s/pull/125) ([xunleii](https://github.com/xunleii))\n- feat\\(k3s\\_env\\_vars\\): introduce k3s\\_install\\_env\\_vars [\\#124](https://github.com/xunleii/terraform-module-k3s/pull/124) ([FalcoSuessgott](https://github.com/FalcoSuessgott))\n- Dual-stack & IPv6 fixes [\\#113](https://github.com/xunleii/terraform-module-k3s/pull/113) ([djh00t](https://github.com/djh00t))\n- Update providers and fix \\#110 [\\#112](https://github.com/xunleii/terraform-module-k3s/pull/112) ([xunleii](https://github.com/xunleii))\n- Add support for INSTALL\\_K3S\\_SELINUX\\_WARN [\\#109](https://github.com/xunleii/terraform-module-k3s/pull/109) ([hobbypunk90](https://github.com/hobbypunk90))\n\n## [v3.2.0](https://github.com/xunleii/terraform-module-k3s/tree/v3.2.0) (2022-10-18)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v3.1.0...v3.2.0)\n\n**Dependencies upgrades:**\n\n- Update actions-ecosystem/action-remove-labels digest to d051625 [\\#103](https://github.com/xunleii/terraform-module-k3s/pull/103) ([renovate[bot]](https://github.com/apps/renovate))\n- Update EndBug/add-and-commit action to v9.0.1 [\\#99](https://github.com/xunleii/terraform-module-k3s/pull/99) ([renovate[bot]](https://github.com/apps/renovate))\n- Update xunleii/github-actions-grimoire digest to 42f3d38 [\\#98](https://github.com/xunleii/terraform-module-k3s/pull/98) ([renovate[bot]](https://github.com/apps/renovate))\n- Update actions/checkout action to v3 [\\#97](https://github.com/xunleii/terraform-module-k3s/pull/97) ([renovate[bot]](https://github.com/apps/renovate))\n- Update EndBug/add-and-commit action to v9 [\\#94](https://github.com/xunleii/terraform-module-k3s/pull/94) ([renovate[bot]](https://github.com/apps/renovate))\n- Update Hetzner Cloud example [\\#93](https://github.com/xunleii/terraform-module-k3s/pull/93) ([xunleii](https://github.com/xunleii))\n- Update actions/checkout action to v2.4.2 [\\#89](https://github.com/xunleii/terraform-module-k3s/pull/89) ([renovate[bot]](https://github.com/apps/renovate))\n- Update xunleii/github-actions-grimoire digest to 7b2b767 [\\#87](https://github.com/xunleii/terraform-module-k3s/pull/87) ([renovate[bot]](https://github.com/apps/renovate))\n- Update actions/checkout action to v3 [\\#86](https://github.com/xunleii/terraform-module-k3s/pull/86) ([renovate[bot]](https://github.com/apps/renovate))\n\n**Closed issues:**\n\n- Error sensitive var.servers [\\#84](https://github.com/xunleii/terraform-module-k3s/issues/84)\n- Publish a new version on the Terraform registry  [\\#79](https://github.com/xunleii/terraform-module-k3s/issues/79)\n\n**Merged pull requests:**\n\n- fix: typo in variables.tf [\\#96](https://github.com/xunleii/terraform-module-k3s/pull/96) ([Tchoupinax](https://github.com/Tchoupinax))\n- Fix some Github Action issues [\\#92](https://github.com/xunleii/terraform-module-k3s/pull/92) ([xunleii](https://github.com/xunleii))\n- Reenable auto changelog generation [\\#91](https://github.com/xunleii/terraform-module-k3s/pull/91) ([xunleii](https://github.com/xunleii))\n- Add missing permission on github actions workflow [\\#90](https://github.com/xunleii/terraform-module-k3s/pull/90) ([xunleii](https://github.com/xunleii))\n- addressing changes in recent hashicorp tls provider [\\#88](https://github.com/xunleii/terraform-module-k3s/pull/88) ([ptu](https://github.com/ptu))\n- Generate Changelog automatically [\\#82](https://github.com/xunleii/terraform-module-k3s/pull/82) ([xunleii](https://github.com/xunleii))\n\n## [v3.1.0](https://github.com/xunleii/terraform-module-k3s/tree/v3.1.0) (2022-01-04)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v3.0.0...v3.1.0)\n\n**Dependencies upgrades:**\n\n- chore\\(deps\\): update commitlint monorepo \\(major\\) [\\#78](https://github.com/xunleii/terraform-module-k3s/pull/78) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update actions/checkout action to v2.4.0 [\\#77](https://github.com/xunleii/terraform-module-k3s/pull/77) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update commitlint monorepo to v15 \\(major\\) [\\#76](https://github.com/xunleii/terraform-module-k3s/pull/76) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update zgosalvez/github-actions-ensure-sha-pinned-actions action to v1.1.1 [\\#75](https://github.com/xunleii/terraform-module-k3s/pull/75) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency husky to v7.0.4 [\\#74](https://github.com/xunleii/terraform-module-k3s/pull/74) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update marocchino/sticky-pull-request-comment action to v2.2.0 [\\#73](https://github.com/xunleii/terraform-module-k3s/pull/73) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update actions/checkout action to v2.3.5 [\\#72](https://github.com/xunleii/terraform-module-k3s/pull/72) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update wagoid/commitlint-github-action action to v4.1.9 [\\#71](https://github.com/xunleii/terraform-module-k3s/pull/71) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency @commitlint/cli to v13.2.1 [\\#70](https://github.com/xunleii/terraform-module-k3s/pull/70) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update marocchino/sticky-pull-request-comment action to v2.1.1 [\\#68](https://github.com/xunleii/terraform-module-k3s/pull/68) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update terraform random to v3 [\\#65](https://github.com/xunleii/terraform-module-k3s/pull/65) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update terraform null to v3 [\\#64](https://github.com/xunleii/terraform-module-k3s/pull/64) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update terraform http to v2 [\\#63](https://github.com/xunleii/terraform-module-k3s/pull/63) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update dependency husky to v7 [\\#62](https://github.com/xunleii/terraform-module-k3s/pull/62) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): update commitlint monorepo to v13 \\(major\\) [\\#61](https://github.com/xunleii/terraform-module-k3s/pull/61) ([renovate[bot]](https://github.com/apps/renovate))\n- chore\\(deps\\): pin dependencies [\\#58](https://github.com/xunleii/terraform-module-k3s/pull/58) ([renovate[bot]](https://github.com/apps/renovate))\n\n**Merged pull requests:**\n\n- Remove commit lint dependencies [\\#81](https://github.com/xunleii/terraform-module-k3s/pull/81) ([xunleii](https://github.com/xunleii))\n- Output the Kubernetes cluster secret [\\#80](https://github.com/xunleii/terraform-module-k3s/pull/80) ([orf](https://github.com/orf))\n- Add Hacktoberfest labels [\\#69](https://github.com/xunleii/terraform-module-k3s/pull/69) ([xunleii](https://github.com/xunleii))\n- Rewrite CI/CD workflows [\\#67](https://github.com/xunleii/terraform-module-k3s/pull/67) ([xunleii](https://github.com/xunleii))\n- Add new use\\_sudo input to the documentation [\\#66](https://github.com/xunleii/terraform-module-k3s/pull/66) ([Corwind](https://github.com/Corwind))\n- add option to use kubectl with sudo [\\#57](https://github.com/xunleii/terraform-module-k3s/pull/57) ([Corwind](https://github.com/Corwind))\n- Configure Renovate [\\#56](https://github.com/xunleii/terraform-module-k3s/pull/56) ([renovate[bot]](https://github.com/apps/renovate))\n- Fix civo example [\\#55](https://github.com/xunleii/terraform-module-k3s/pull/55) ([debovema](https://github.com/debovema))\n\n## [v3.0.0](https://github.com/xunleii/terraform-module-k3s/tree/v3.0.0) (2021-06-27)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v2.2.4...v3.0.0)\n\n**Closed issues:**\n\n- rename variable name to cluster\\_domain [\\#53](https://github.com/xunleii/terraform-module-k3s/issues/53)\n- Pod and Service cidrs must be passed on all masters \\(not just the 1st one\\) [\\#52](https://github.com/xunleii/terraform-module-k3s/issues/52)\n- Hetzner example doesn't work [\\#50](https://github.com/xunleii/terraform-module-k3s/issues/50)\n- mkdir: cannot create directory ‘/var/lib/rancher’: Permission denied [\\#42](https://github.com/xunleii/terraform-module-k3s/issues/42)\n\n**Merged pull requests:**\n\n- Resolve issues \\#52 & \\#53 [\\#54](https://github.com/xunleii/terraform-module-k3s/pull/54) ([xunleii](https://github.com/xunleii))\n\n## [v2.2.4](https://github.com/xunleii/terraform-module-k3s/tree/v2.2.4) (2021-04-30)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v2.2.3...v2.2.4)\n\n**Closed issues:**\n\n- Failed to join the cluster with the same name [\\#26](https://github.com/xunleii/terraform-module-k3s/issues/26)\n\n**Merged pull requests:**\n\n- Enhancing 'Hetzner example' docs [\\#51](https://github.com/xunleii/terraform-module-k3s/pull/51) ([NicoWde](https://github.com/NicoWde))\n- Add support for provisioning without logging in as root [\\#49](https://github.com/xunleii/terraform-module-k3s/pull/49) ([caleb-devops](https://github.com/caleb-devops))\n\n## [v2.2.3](https://github.com/xunleii/terraform-module-k3s/tree/v2.2.3) (2021-02-17)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v2.2.2...v2.2.3)\n\n**Merged pull requests:**\n\n- fix: add \\*\\_drain to kubernetes\\_ready [\\#48](https://github.com/xunleii/terraform-module-k3s/pull/48) ([xunleii](https://github.com/xunleii))\n\n## [v2.2.2](https://github.com/xunleii/terraform-module-k3s/tree/v2.2.2) (2021-02-13)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v2.2.1...v2.2.2)\n\n**Merged pull requests:**\n\n- feat: add dependency endpoint to allow sychronizing k3s install & provisionning [\\#47](https://github.com/xunleii/terraform-module-k3s/pull/47) ([xunleii](https://github.com/xunleii))\n\n## [v2.2.1](https://github.com/xunleii/terraform-module-k3s/tree/v2.2.1) (2021-02-10)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v2.2.0...v2.2.1)\n\n**Closed issues:**\n\n- failed to start k3s node with label `node-role.kubernetes.io/***` [\\#45](https://github.com/xunleii/terraform-module-k3s/issues/45)\n- register: metadata.name: Invalid value [\\#44](https://github.com/xunleii/terraform-module-k3s/issues/44)\n- Fix this stupid CI [\\#38](https://github.com/xunleii/terraform-module-k3s/issues/38)\n\n**Merged pull requests:**\n\n- fix: correct some installation issues \\(\\#44 & \\#45\\) [\\#46](https://github.com/xunleii/terraform-module-k3s/pull/46) ([xunleii](https://github.com/xunleii))\n- Generate Kubeconfig file [\\#37](https://github.com/xunleii/terraform-module-k3s/pull/37) ([guitcastro](https://github.com/guitcastro))\n- removed missing additional\\_flags from readme [\\#36](https://github.com/xunleii/terraform-module-k3s/pull/36) ([guitcastro](https://github.com/guitcastro))\n- doc: update README [\\#35](https://github.com/xunleii/terraform-module-k3s/pull/35) ([xunleii](https://github.com/xunleii))\n\n## [v2.2.0](https://github.com/xunleii/terraform-module-k3s/tree/v2.2.0) (2021-01-03)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v2.1.0...v2.2.0)\n\n**Closed issues:**\n\n- kube\\_config output missing  [\\#41](https://github.com/xunleii/terraform-module-k3s/issues/41)\n- NodeNotFound when trying to update nodes [\\#31](https://github.com/xunleii/terraform-module-k3s/issues/31)\n\n**Merged pull requests:**\n\n- Try to fix this CI.... another time [\\#40](https://github.com/xunleii/terraform-module-k3s/pull/40) ([xunleii](https://github.com/xunleii))\n- Fix doc typo in readme [\\#39](https://github.com/xunleii/terraform-module-k3s/pull/39) ([DblK](https://github.com/DblK))\n\n## [v2.1.0](https://github.com/xunleii/terraform-module-k3s/tree/v2.1.0) (2020-08-26)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v2.0.1...v2.1.0)\n\n**Closed issues:**\n\n- Deprecation of network\\_id in `hcloud_server_network` [\\#29](https://github.com/xunleii/terraform-module-k3s/issues/29)\n- Remove or fix the 'latest' feature [\\#27](https://github.com/xunleii/terraform-module-k3s/issues/27)\n- Agent not update when k3s version changes [\\#24](https://github.com/xunleii/terraform-module-k3s/issues/24)\n- Need actions to test automatically PR [\\#5](https://github.com/xunleii/terraform-module-k3s/issues/5)\n\n**Merged pull requests:**\n\n- fix: repair Terraform workflow \\(CI\\) [\\#33](https://github.com/xunleii/terraform-module-k3s/pull/33) ([xunleii](https://github.com/xunleii))\n- Make sure the node is up before trying to use it. [\\#32](https://github.com/xunleii/terraform-module-k3s/pull/32) ([tedsteen](https://github.com/tedsteen))\n- fix: replace network\\_id with subnet\\_id [\\#30](https://github.com/xunleii/terraform-module-k3s/pull/30) ([solidnerd](https://github.com/solidnerd))\n- fix: use k3s update channels for latest releases instead of github [\\#28](https://github.com/xunleii/terraform-module-k3s/pull/28) ([solidnerd](https://github.com/solidnerd))\n\n## [v2.0.1](https://github.com/xunleii/terraform-module-k3s/tree/v2.0.1) (2020-05-31)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v2.0.0...v2.0.1)\n\n**Closed issues:**\n\n- CI needs to be fixed before v2 release [\\#22](https://github.com/xunleii/terraform-module-k3s/issues/22)\n\n**Merged pull requests:**\n\n- fix: do not uninstall k3s during upgrade [\\#25](https://github.com/xunleii/terraform-module-k3s/pull/25) ([xunleii](https://github.com/xunleii))\n\n## [v2.0.0](https://github.com/xunleii/terraform-module-k3s/tree/v2.0.0) (2020-05-31)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.7.0...v2.0.0)\n\n**Closed issues:**\n\n- Server taints flags are not used [\\#20](https://github.com/xunleii/terraform-module-k3s/issues/20)\n- Make it possible to have additional flags per agent [\\#18](https://github.com/xunleii/terraform-module-k3s/issues/18)\n\n**Merged pull requests:**\n\n- fix: update Github Actions worflow [\\#23](https://github.com/xunleii/terraform-module-k3s/pull/23) ([xunleii](https://github.com/xunleii))\n- feat: rewrote module [\\#21](https://github.com/xunleii/terraform-module-k3s/pull/21) ([xunleii](https://github.com/xunleii))\n- Additional flags per instance [\\#19](https://github.com/xunleii/terraform-module-k3s/pull/19) ([tedsteen](https://github.com/tedsteen))\n\n## [v1.7.0](https://github.com/xunleii/terraform-module-k3s/tree/v1.7.0) (2020-01-31)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.6.3...v1.7.0)\n\n**Merged pull requests:**\n\n- feat: add node taints & labels [\\#17](https://github.com/xunleii/terraform-module-k3s/pull/17) ([xunleii](https://github.com/xunleii))\n\n## [v1.6.3](https://github.com/xunleii/terraform-module-k3s/tree/v1.6.3) (2019-12-28)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.6.2...v1.6.3)\n\n**Merged pull requests:**\n\n- fix: use node\\_name field in node deletion [\\#16](https://github.com/xunleii/terraform-module-k3s/pull/16) ([xunleii](https://github.com/xunleii))\n\n## [v1.6.2](https://github.com/xunleii/terraform-module-k3s/tree/v1.6.2) (2019-12-21)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.6.1...v1.6.2)\n\n**Merged pull requests:**\n\n- feat: use name in agent nodes [\\#15](https://github.com/xunleii/terraform-module-k3s/pull/15) ([xunleii](https://github.com/xunleii))\n\n## [v1.6.1](https://github.com/xunleii/terraform-module-k3s/tree/v1.6.1) (2019-12-04)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.6.0...v1.6.1)\n\n**Merged pull requests:**\n\n- feat: upload installer [\\#14](https://github.com/xunleii/terraform-module-k3s/pull/14) ([xunleii](https://github.com/xunleii))\n\n## [v1.6.0](https://github.com/xunleii/terraform-module-k3s/tree/v1.6.0) (2019-12-04)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.5.0...v1.6.0)\n\n**Merged pull requests:**\n\n- refact: rename node roles in server and agent [\\#13](https://github.com/xunleii/terraform-module-k3s/pull/13) ([xunleii](https://github.com/xunleii))\n- Refact clean module [\\#12](https://github.com/xunleii/terraform-module-k3s/pull/12) ([xunleii](https://github.com/xunleii))\n\n## [v1.5.0](https://github.com/xunleii/terraform-module-k3s/tree/v1.5.0) (2019-12-01)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.4.0...v1.5.0)\n\n## [v1.4.0](https://github.com/xunleii/terraform-module-k3s/tree/v1.4.0) (2019-11-27)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.3.2...v1.4.0)\n\n**Merged pull requests:**\n\n- refact: clean custom flags feature [\\#11](https://github.com/xunleii/terraform-module-k3s/pull/11) ([xunleii](https://github.com/xunleii))\n\n## [v1.3.2](https://github.com/xunleii/terraform-module-k3s/tree/v1.3.2) (2019-11-27)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.3.1...v1.3.2)\n\n**Merged pull requests:**\n\n- fix: join custom arguments [\\#10](https://github.com/xunleii/terraform-module-k3s/pull/10) ([xunleii](https://github.com/xunleii))\n\n## [v1.3.1](https://github.com/xunleii/terraform-module-k3s/tree/v1.3.1) (2019-11-27)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.2.3...v1.3.1)\n\n**Merged pull requests:**\n\n- feat: add custom arguments [\\#9](https://github.com/xunleii/terraform-module-k3s/pull/9) ([xunleii](https://github.com/xunleii))\n\n## [v1.2.3](https://github.com/xunleii/terraform-module-k3s/tree/v1.2.3) (2019-11-24)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.2.2...v1.2.3)\n\n**Merged pull requests:**\n\n- fix: remove warning 'quoted keywords are now deprecated' [\\#8](https://github.com/xunleii/terraform-module-k3s/pull/8) ([xunleii](https://github.com/xunleii))\n\n## [v1.2.2](https://github.com/xunleii/terraform-module-k3s/tree/v1.2.2) (2019-11-16)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.2.1...v1.2.2)\n\n**Merged pull requests:**\n\n- feat: add Terraform actions [\\#6](https://github.com/xunleii/terraform-module-k3s/pull/6) ([xunleii](https://github.com/xunleii))\n\n## [v1.2.1](https://github.com/xunleii/terraform-module-k3s/tree/v1.2.1) (2019-11-16)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.2.0...v1.2.1)\n\n## [v1.2.0](https://github.com/xunleii/terraform-module-k3s/tree/v1.2.0) (2019-11-16)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.1.0...v1.2.0)\n\n**Closed issues:**\n\n- Remove 'scp' dependency [\\#3](https://github.com/xunleii/terraform-module-k3s/issues/3)\n\n**Merged pull requests:**\n\n- Remove 'scp' dependency [\\#4](https://github.com/xunleii/terraform-module-k3s/pull/4) ([xunleii](https://github.com/xunleii))\n\n## [v1.1.0](https://github.com/xunleii/terraform-module-k3s/tree/v1.1.0) (2019-11-03)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/v1.0.0...v1.1.0)\n\n**Closed issues:**\n\n- Impossible to remove one \\(several\\) minion node\\(s\\) [\\#1](https://github.com/xunleii/terraform-module-k3s/issues/1)\n\n**Merged pull requests:**\n\n- \\#1 - fix removable node [\\#2](https://github.com/xunleii/terraform-module-k3s/pull/2) ([xunleii](https://github.com/xunleii))\n\n## [v1.0.0](https://github.com/xunleii/terraform-module-k3s/tree/v1.0.0) (2019-11-02)\n\n[Full Changelog](https://github.com/xunleii/terraform-module-k3s/compare/ccc49fe3f98ef7a9885dcf5ae3efb087048497f9...v1.0.0)\n\n\n\n\\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2019 Alexandre NICOLAIE\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# terraform-module-k3s\n![Terraform Version](https://img.shields.io/badge/terraform-≈_1.0-blueviolet)\n[![GitHub tag (latest SemVer)](https://img.shields.io/github/v/tag/xunleii/terraform-module-k3s?label=registry)](https://registry.terraform.io/modules/xunleii/k3s)\n[![GitHub issues](https://img.shields.io/github/issues/xunleii/terraform-module-k3s)](https://github.com/xunleii/terraform-module-k3s/issues)\n[![Open Source Helpers](https://www.codetriage.com/xunleii/terraform-module-k3s/badges/users.svg)](https://www.codetriage.com/xunleii/terraform-module-k3s)\n[![MIT Licensed](https://img.shields.io/badge/license-MIT-green.svg)](https://tldrlegal.com/license/mit-license)\n\nTerraform module to create a [k3s](https://k3s.io/) cluster with multi-server and annotations/labels/taints management features.\n\n\n## :warning: Security disclosure\n\nBecause the use of external references on the `destroy` provisioner is deprecated by Terraform, storing information inside each resource is mandatory in order to manage several functionalities such as automatic node draining and field management. As a result, several fields such as the `connection` block will be available in your TF state.\nThis means that the password or private key used will be **clearly readable** in this TF state.  \n**Please be very careful to store your TF state securely if you use a private key or password in the `connection` block.**\n\n<!-- BEGIN_TF_DOCS -->\n## Example _(based on [Hetzner Cloud example](examples/hcloud-k3s))_\n\n```hcl\nmodule \"k3s\" {\n  source = \"xunleii/k3s/module\"\n\n  depends_on_    = hcloud_server.agents\n  k3s_version    = \"latest\"\n  cluster_domain = \"cluster.local\"\n  cidr = {\n    pods     = \"10.42.0.0/16\"\n    services = \"10.43.0.0/16\"\n  }\n  drain_timeout  = \"30s\"\n  managed_fields = [\"label\", \"taint\"] // ignore annotations\n\n  global_flags = [\n    \"--flannel-iface ens10\",\n    \"--kubelet-arg cloud-provider=external\" // required to use https://github.com/hetznercloud/hcloud-cloud-controller-manager\n  ]\n\n  servers = {\n    for i in range(length(hcloud_server.control_planes)) :\n    hcloud_server.control_planes[i].name => {\n      ip = hcloud_server_network.control_planes[i].ip\n      connection = {\n        host        = hcloud_server.control_planes[i].ipv4_address\n        private_key = trimspace(tls_private_key.ed25519_provisioning.private_key_pem)\n      }\n      flags = [\n        \"--disable-cloud-controller\",\n        \"--tls-san ${hcloud_server.control_planes[0].ipv4_address}\",\n      ]\n      annotations = { \"server_id\" : i } // theses annotations will not be managed by this module\n    }\n  }\n\n  agents = {\n    for i in range(length(hcloud_server.agents)) :\n    \"${hcloud_server.agents[i].name}_node\" => {\n      name = hcloud_server.agents[i].name\n      ip   = hcloud_server_network.agents_network[i].ip\n      connection = {\n        host        = hcloud_server.agents[i].ipv4_address\n        private_key = trimspace(tls_private_key.ed25519_provisioning.private_key_pem)\n      }\n\n      labels = { \"node.kubernetes.io/pool\" = hcloud_server.agents[i].labels.nodepool }\n      taints = { \"dedicated\" : hcloud_server.agents[i].labels.nodepool == \"gpu\" ? \"gpu:NoSchedule\" : null }\n    }\n  }\n}\n```\n\n## Inputs\n\n| Name | Description | Type | Default | Required |\n|------|-------------|------|---------|:--------:|\n| <a name=\"input_servers\"></a> [servers](#input\\_servers) | K3s server nodes definition. The key is used as node name if no name is provided. | `map(any)` | n/a | yes |\n| <a name=\"input_agents\"></a> [agents](#input\\_agents) | K3s agent nodes definitions. The key is used as node name if no name is provided. | `map(any)` | `{}` | no |\n| <a name=\"input_cidr\"></a> [cidr](#input\\_cidr) | K3s network CIDRs (see https://rancher.com/docs/k3s/latest/en/installation/install-options/). | <pre>object({<br>    pods     = string<br>    services = string<br>  })</pre> | <pre>{<br>  \"pods\": \"10.42.0.0/16\",<br>  \"services\": \"10.43.0.0/16\"<br>}</pre> | no |\n| <a name=\"input_cluster_domain\"></a> [cluster\\_domain](#input\\_cluster\\_domain) | K3s cluster domain name (see https://rancher.com/docs/k3s/latest/en/installation/install-options/). | `string` | `\"cluster.local\"` | no |\n| <a name=\"input_depends_on_\"></a> [depends\\_on\\_](#input\\_depends\\_on\\_) | Resource dependency of this module. | `any` | `null` | no |\n| <a name=\"input_drain_timeout\"></a> [drain\\_timeout](#input\\_drain\\_timeout) | The length of time to wait before giving up the node draining. Infinite by default. | `string` | `\"0s\"` | no |\n| <a name=\"input_generate_ca_certificates\"></a> [generate\\_ca\\_certificates](#input\\_generate\\_ca\\_certificates) | If true, this module will generate the CA certificates (see https://github.com/rancher/k3s/issues/1868#issuecomment-639690634). Otherwise rancher will generate it. This is required to generate kubeconfig | `bool` | `true` | no |\n| <a name=\"input_global_flags\"></a> [global\\_flags](#input\\_global\\_flags) | Add additional installation flags, used by all nodes (see https://rancher.com/docs/k3s/latest/en/installation/install-options/). | `list(string)` | `[]` | no |\n| <a name=\"input_k3s_install_env_vars\"></a> [k3s\\_install\\_env\\_vars](#input\\_k3s\\_install\\_env\\_vars) | map of enviroment variables that are passed to the k3s installation script (see https://docs.k3s.io/reference/env-variables) | `map(string)` | `{}` | no |\n| <a name=\"input_k3s_version\"></a> [k3s\\_version](#input\\_k3s\\_version) | Specify the k3s version. You can choose from the following release channels or pin the version directly | `string` | `\"latest\"` | no |\n| <a name=\"input_kubernetes_certificates\"></a> [kubernetes\\_certificates](#input\\_kubernetes\\_certificates) | A list of maps of cerificate-name.[crt/key] : cerficate-value to copied to /var/lib/rancher/k3s/server/tls, if this option is used generate\\_ca\\_certificates will be treat as false | <pre>list(<br>    object({<br>      file_name    = string,<br>      file_content = string<br>    })<br>  )</pre> | `[]` | no |\n| <a name=\"input_managed_fields\"></a> [managed\\_fields](#input\\_managed\\_fields) | List of fields which must be managed by this module (can be annotation, label and/or taint). | `list(string)` | <pre>[<br>  \"annotation\",<br>  \"label\",<br>  \"taint\"<br>]</pre> | no |\n| <a name=\"input_name\"></a> [name](#input\\_name) | K3s cluster domain name (see https://rancher.com/docs/k3s/latest/en/installation/install-options/). This input is deprecated and will be remove in the next major release. Use `cluster_domain` instead. | `string` | `\"!!!DEPRECATED!!!\"` | no |\n| <a name=\"input_separator\"></a> [separator](#input\\_separator) | Separator used to separates node name and field name (used to manage annotations, labels and taints). | `string` | `\"\\|\"` | no |\n| <a name=\"input_use_sudo\"></a> [use\\_sudo](#input\\_use\\_sudo) | Whether or not to use kubectl with sudo during cluster setup. | `bool` | `false` | no |\n\n## Outputs\n\n| Name | Description |\n|------|-------------|\n| <a name=\"output_kube_config\"></a> [kube\\_config](#output\\_kube\\_config) | Genereated kubeconfig. |\n| <a name=\"output_kubernetes\"></a> [kubernetes](#output\\_kubernetes) | Authentication credentials of Kubernetes (full administrator). |\n| <a name=\"output_kubernetes_cluster_secret\"></a> [kubernetes\\_cluster\\_secret](#output\\_kubernetes\\_cluster\\_secret) | Secret token used to join nodes to the cluster |\n| <a name=\"output_kubernetes_ready\"></a> [kubernetes\\_ready](#output\\_kubernetes\\_ready) | Dependency endpoint to synchronize k3s installation and provisioning. |\n| <a name=\"output_summary\"></a> [summary](#output\\_summary) | Current state of k3s (version & nodes). |\n\n## Providers\n\n| Name | Version |\n|------|---------|\n| <a name=\"provider_http\"></a> [http](#provider\\_http) | ~> 3.0 |\n| <a name=\"provider_null\"></a> [null](#provider\\_null) | ~> 3.0 |\n| <a name=\"provider_random\"></a> [random](#provider\\_random) | ~> 3.0 |\n| <a name=\"provider_tls\"></a> [tls](#provider\\_tls) | ~> 4.0 |\n<!-- END_TF_DOCS -->\n\n## Frequently Asked Questions\n\n### How to customise the generated `kubeconfig`\n\nIt is sometimes necessary to modify the context or the cluster name to adapt `kubeconfig` to a third-party tool or to avoid conflicts with existing tools. Although this is not the role of this module, it can easily be done with its outputs :\n\n```hcl\nmodule \"k3s\" {\n  ...\n}\n\nlocal {\n  kubeconfig = yamlencode({\n    apiVersion      = \"v1\"\n    kind            = \"Config\"\n    current-context = \"my-context-name\"\n    contexts = [{\n      context = {\n        cluster = \"my-cluster-name\"\n        user : \"my-user-name\"\n      }\n      name = \"my-context-name\"\n    }]\n    clusters = [{\n      cluster = {\n        certificate-authority-data = base64encode(module.k3s.kubernetes.cluster_ca_certificate)\n        server                     = module.k3s.kubernetes.api_endpoint\n      }\n      name = \"my-cluster-name\"\n    }]\n    users = [{\n      user = {\n        client-certificate-data : base64encode(module.k3s.kubernetes.client_certificate)\n        client-key-data : base64encode(module.k3s.kubernetes.client_key)\n      }\n      name : \"my-user-name\"\n    }]\n  })\n}\n```\n\n## License\n`terraform-module-k3s` is released under the **MIT License**. See the bundled [LICENSE](LICENSE) file for details.\n\n#\n*Generated with :heart: by [terraform-docs](https://github.com/terraform-docs/terraform-docs)*\n"
  },
  {
    "path": "Taskfile.yaml",
    "content": "# yaml-language-server: $schema=https://taskfile.dev/schema.json\nversion: \"3\"\n\ntasks:\n  default: { cmds: [task --list], silent: true }\n\n  dev:lint:\n    aliases: [lint]\n    cmds:\n      - terraform fmt -recursive\n    desc: Lint terraform code\n\n  examples:hcloud:setup:\n    aliases: [test, dev:test]\n    cmds:\n      - terraform init\n      - terraform validate\n      - terraform apply -auto-approve\n      - terraform output -raw kubeconfig > kubeconfig~\n    desc: Test this terraform module on Hetzner Cloud\n    dir: examples/hcloud-k3s\n    generates:\n      - kubeconfig~\n    interactive: true\n    requires:\n      vars: [HCLOUD_TOKEN]\n    sources:\n      # - \"../../*.tf\"\n      - \"*.tf\"\n\n  examples:hcloud:teardown:\n    cmds:\n      - terraform destroy -auto-approve\n      - rm -f kubeconfig~\n    desc: Remove all resources created by test:hcloud:setup\n    dir: examples/hcloud-k3s\n    interactive: true\n    preconditions:\n      - sh: test -f kubeconfig~\n        msg: Run `test:hcloud:setup` first\n    prompt: Are you sure you want to destroy all resources created by `test:hcloud:setup`?\n    requires:\n      vars: [HCLOUD_TOKEN]\n\n  e2e:hcloud:\n    aliases: [e2e]\n    cmds:\n      - task: examples:hcloud:setup\n      - defer: task examples:hcloud:teardown\n      - kubectl --kubeconfig examples/hcloud-k3s/kubeconfig~ get nodes\n    desc: Run e2e tests on Hetzner Cloud\n"
  },
  {
    "path": "agent_nodes.tf",
    "content": "locals {\n  // Generate a map of all agents annotations in order to manage them through this module. This\n  // generation is made in two steps:\n  // - generate a list of objects representing all annotations, following this\n  //   'template' {key = node_name|annotation_name, value = annotation_value}\n  // - generate a map based on the generated list (using the field key as map key)\n  agent_annotations_list = flatten([\n    for nk, nv in var.agents : [\n      // Because we need node name and annotation name when we remove the annotation resource, we need\n      // to share them through the annotation key (each.value are not avaible on destruction).\n      for ak, av in try(nv.annotations, {}) : av == null ? { key : \"\" } : { key : \"${nk}${var.separator}${ak}\", value : av }\n    ]\n  ])\n  agent_annotations = local.managed_annotation_enabled ? { for o in local.agent_annotations_list : o.key => o.value if o.key != \"\" } : {}\n\n  // Generate a map of all agents labels in order to manage them through this module. This\n  // generation is made in two steps, following the same process than annotation's map.\n  agent_labels_list = flatten([\n    for nk, nv in var.agents : [\n      // Because we need node name and label name when we remove the label resource, we need\n      // to share them through the label key (each.value are not avaible on destruction).\n      for lk, lv in try(nv.labels, {}) : lv == null ? { key : \"\" } : { key : \"${nk}${var.separator}${lk}\", value : lv }\n    ]\n  ])\n  agent_labels = local.managed_label_enabled ? { for o in local.agent_labels_list : o.key => o.value if o.key != \"\" } : {}\n\n  // Generate a map of all agents taints in order to manage them through this module. This\n  // generation is made in two steps, following the same process than annotation's map.\n  agent_taints_list = flatten([\n    for nk, nv in var.agents : [\n      // Because we need node name and taint name when we remove the taint resource, we need\n      // to share them through the taint key (each.value are not avaible on destruction).\n      for tk, tv in try(nv.taints, {}) : tv == null ? { key : \"\" } : { key : \"${nk}${var.separator}${tk}\", value : tv }\n    ]\n  ])\n  agent_taints = local.managed_taint_enabled ? { for o in local.agent_taints_list : o.key => o.value if o.key != \"\" } : {}\n\n  // Generate a map of all calculated agent fields, used during k3s installation.\n  agents_metadata = {\n    for key, agent in var.agents :\n    key => {\n      name = try(agent.name, key)\n      ip   = agent.ip\n\n      flags = join(\" \", compact(concat(\n        [\n          \"--node-ip ${agent.ip}\",\n          \"--node-name '${try(agent.name, key)}'\",\n          \"--server https://${local.root_advertise_ip_k3s}:6443\",\n          \"--token ${nonsensitive(random_password.k3s_cluster_secret.result)}\", # NOTE: nonsensitive is used to show logs during provisioning\n        ],\n        var.global_flags,\n        try(agent.flags, []),\n        [for key, value in try(agent.taints, {}) : \"--node-taint '${key}=${value}'\" if value != null]\n      )))\n\n      immutable_fields_hash = sha1(join(\"\", concat(\n        [var.cluster_domain],\n        var.global_flags,\n        try(agent.flags, []),\n      )))\n    }\n  }\n  kubectl_cmd = var.use_sudo ? \"sudo kubectl\" : \"kubectl\"\n}\n\n// Install k3s agent\nresource \"null_resource\" \"agents_install\" {\n  for_each = var.agents\n\n  depends_on = [null_resource.servers_install]\n  triggers = {\n    on_immutable_changes = local.agents_metadata[each.key].immutable_fields_hash\n    on_new_version       = local.k3s_version\n  }\n\n  connection {\n    type = try(each.value.connection.type, \"ssh\")\n\n    host     = try(each.value.connection.host, each.value.ip)\n    user     = try(each.value.connection.user, null)\n    password = try(each.value.connection.password, null)\n    port     = try(each.value.connection.port, null)\n    timeout  = try(each.value.connection.timeout, null)\n\n    script_path    = try(each.value.connection.script_path, null)\n    private_key    = try(each.value.connection.private_key, null)\n    certificate    = try(each.value.connection.certificate, null)\n    agent          = try(each.value.connection.agent, null)\n    agent_identity = try(each.value.connection.agent_identity, null)\n    host_key       = try(each.value.connection.host_key, null)\n\n    https    = try(each.value.connection.https, null)\n    insecure = try(each.value.connection.insecure, null)\n    use_ntlm = try(each.value.connection.use_ntlm, null)\n    cacert   = try(each.value.connection.cacert, null)\n\n    bastion_host        = try(each.value.connection.bastion_host, null)\n    bastion_host_key    = try(each.value.connection.bastion_host_key, null)\n    bastion_port        = try(each.value.connection.bastion_port, null)\n    bastion_user        = try(each.value.connection.bastion_user, null)\n    bastion_password    = try(each.value.connection.bastion_password, null)\n    bastion_private_key = try(each.value.connection.bastion_private_key, null)\n    bastion_certificate = try(each.value.connection.bastion_certificate, null)\n  }\n\n  // Upload k3s install script\n  provisioner \"file\" {\n    content     = data.http.k3s_installer.response_body\n    destination = \"/tmp/k3s-installer\"\n  }\n\n  // Install k3s\n  provisioner \"remote-exec\" {\n    inline = [\n      \"${local.install_env_vars} INSTALL_K3S_VERSION=${local.k3s_version} sh /tmp/k3s-installer agent ${local.agents_metadata[each.key].flags}\",\n      \"until systemctl is-active --quiet k3s-agent.service; do sleep 1; done\"\n    ]\n  }\n}\n\n// Drain k3s node on destruction in order to safely move all workflows to another node.\nresource \"null_resource\" \"agents_drain\" {\n  for_each = var.agents\n\n  depends_on = [null_resource.agents_install]\n  triggers = {\n    // Because some fields must be used on destruction, we need to store them into the current\n    // object. The only way to do that is to use triggers to store theses fields.\n    agent_name      = local.agents_metadata[split(var.separator, each.key)[0]].name\n    connection_json = base64encode(jsonencode(local.root_server_connection))\n    drain_timeout   = var.drain_timeout\n    kubectl_cmd     = local.kubectl_cmd\n  }\n  // Because we use triggers as memory area, we need to ignore all changes on it.\n  lifecycle { ignore_changes = [triggers] }\n\n  connection {\n    type = jsondecode(base64decode(self.triggers.connection_json)).type\n\n    host     = jsondecode(base64decode(self.triggers.connection_json)).host\n    user     = jsondecode(base64decode(self.triggers.connection_json)).user\n    password = jsondecode(base64decode(self.triggers.connection_json)).password\n    port     = jsondecode(base64decode(self.triggers.connection_json)).port\n    timeout  = jsondecode(base64decode(self.triggers.connection_json)).timeout\n\n    script_path    = jsondecode(base64decode(self.triggers.connection_json)).script_path\n    private_key    = jsondecode(base64decode(self.triggers.connection_json)).private_key\n    certificate    = jsondecode(base64decode(self.triggers.connection_json)).certificate\n    agent          = jsondecode(base64decode(self.triggers.connection_json)).agent\n    agent_identity = jsondecode(base64decode(self.triggers.connection_json)).agent_identity\n    host_key       = jsondecode(base64decode(self.triggers.connection_json)).host_key\n\n    https    = jsondecode(base64decode(self.triggers.connection_json)).https\n    insecure = jsondecode(base64decode(self.triggers.connection_json)).insecure\n    use_ntlm = jsondecode(base64decode(self.triggers.connection_json)).use_ntlm\n    cacert   = jsondecode(base64decode(self.triggers.connection_json)).cacert\n\n    bastion_host        = jsondecode(base64decode(self.triggers.connection_json)).bastion_host\n    bastion_host_key    = jsondecode(base64decode(self.triggers.connection_json)).bastion_host_key\n    bastion_port        = jsondecode(base64decode(self.triggers.connection_json)).bastion_port\n    bastion_user        = jsondecode(base64decode(self.triggers.connection_json)).bastion_user\n    bastion_password    = jsondecode(base64decode(self.triggers.connection_json)).bastion_password\n    bastion_private_key = jsondecode(base64decode(self.triggers.connection_json)).bastion_private_key\n    bastion_certificate = jsondecode(base64decode(self.triggers.connection_json)).bastion_certificate\n  }\n\n  provisioner \"remote-exec\" {\n    when = destroy\n    inline = [\n      \"${self.triggers.kubectl_cmd} drain ${self.triggers.agent_name} --delete-local-data --force --ignore-daemonsets --timeout=${self.triggers.drain_timeout}\"\n    ]\n  }\n}\n\n// Add/remove manually annotation on k3s agent\nresource \"null_resource\" \"agents_annotation\" {\n  for_each = local.agent_annotations\n\n  depends_on = [null_resource.agents_install]\n  triggers = {\n    agent_name       = local.agents_metadata[split(var.separator, each.key)[0]].name\n    annotation_name  = split(var.separator, each.key)[1]\n    on_value_changes = each.value\n\n    // Because some fields must be used on destruction, we need to store them into the current\n    // object. The only way to do that is to use triggers to store theses fields.\n    connection_json = base64encode(jsonencode(local.root_server_connection))\n    kubectl_cmd     = local.kubectl_cmd\n  }\n  // Because we dont care about connection modification, we ignore its changes.\n  lifecycle { ignore_changes = [triggers[\"connection_json\"], triggers[\"kubectl_cmd\"]] }\n\n  connection {\n    type = jsondecode(base64decode(self.triggers.connection_json)).type\n\n    host     = jsondecode(base64decode(self.triggers.connection_json)).host\n    user     = jsondecode(base64decode(self.triggers.connection_json)).user\n    password = jsondecode(base64decode(self.triggers.connection_json)).password\n    port     = jsondecode(base64decode(self.triggers.connection_json)).port\n    timeout  = jsondecode(base64decode(self.triggers.connection_json)).timeout\n\n    script_path    = jsondecode(base64decode(self.triggers.connection_json)).script_path\n    private_key    = jsondecode(base64decode(self.triggers.connection_json)).private_key\n    certificate    = jsondecode(base64decode(self.triggers.connection_json)).certificate\n    agent          = jsondecode(base64decode(self.triggers.connection_json)).agent\n    agent_identity = jsondecode(base64decode(self.triggers.connection_json)).agent_identity\n    host_key       = jsondecode(base64decode(self.triggers.connection_json)).host_key\n\n    https    = jsondecode(base64decode(self.triggers.connection_json)).https\n    insecure = jsondecode(base64decode(self.triggers.connection_json)).insecure\n    use_ntlm = jsondecode(base64decode(self.triggers.connection_json)).use_ntlm\n    cacert   = jsondecode(base64decode(self.triggers.connection_json)).cacert\n\n    bastion_host        = jsondecode(base64decode(self.triggers.connection_json)).bastion_host\n    bastion_host_key    = jsondecode(base64decode(self.triggers.connection_json)).bastion_host_key\n    bastion_port        = jsondecode(base64decode(self.triggers.connection_json)).bastion_port\n    bastion_user        = jsondecode(base64decode(self.triggers.connection_json)).bastion_user\n    bastion_password    = jsondecode(base64decode(self.triggers.connection_json)).bastion_password\n    bastion_private_key = jsondecode(base64decode(self.triggers.connection_json)).bastion_private_key\n    bastion_certificate = jsondecode(base64decode(self.triggers.connection_json)).bastion_certificate\n  }\n\n  provisioner \"remote-exec\" {\n    inline = [\n      \"until kubectl get node ${self.triggers.agent_name}; do sleep 1; done\",\n      \"${self.triggers.kubectl_cmd} annotate --overwrite node ${self.triggers.agent_name} ${self.triggers.annotation_name}=${self.triggers.on_value_changes}\"\n    ]\n  }\n\n  provisioner \"remote-exec\" {\n    when = destroy\n    inline = [\n      \"${self.triggers.kubectl_cmd} annotate node ${self.triggers.agent_name} ${self.triggers.annotation_name}-\"\n    ]\n  }\n}\n\n// Add/remove manually label on k3s agent\nresource \"null_resource\" \"agents_label\" {\n  for_each = local.agent_labels\n\n  depends_on = [null_resource.agents_install]\n  triggers = {\n    agent_name       = local.agents_metadata[split(var.separator, each.key)[0]].name\n    label_name       = split(var.separator, each.key)[1]\n    on_value_changes = each.value\n\n    // Because some fields must be used on destruction, we need to store them into the current\n    // object. The only way to do that is to use triggers to store theses fields.\n    connection_json = base64encode(jsonencode(local.root_server_connection))\n    kubectl_cmd     = local.kubectl_cmd\n  }\n  // Because we dont care about connection modification, we ignore its changes.\n  lifecycle { ignore_changes = [triggers[\"connection_json\"], triggers[\"kubectl_cmd\"]] }\n\n  connection {\n    type = jsondecode(base64decode(self.triggers.connection_json)).type\n\n    host     = jsondecode(base64decode(self.triggers.connection_json)).host\n    user     = jsondecode(base64decode(self.triggers.connection_json)).user\n    password = jsondecode(base64decode(self.triggers.connection_json)).password\n    port     = jsondecode(base64decode(self.triggers.connection_json)).port\n    timeout  = jsondecode(base64decode(self.triggers.connection_json)).timeout\n\n    script_path    = jsondecode(base64decode(self.triggers.connection_json)).script_path\n    private_key    = jsondecode(base64decode(self.triggers.connection_json)).private_key\n    certificate    = jsondecode(base64decode(self.triggers.connection_json)).certificate\n    agent          = jsondecode(base64decode(self.triggers.connection_json)).agent\n    agent_identity = jsondecode(base64decode(self.triggers.connection_json)).agent_identity\n    host_key       = jsondecode(base64decode(self.triggers.connection_json)).host_key\n\n    https    = jsondecode(base64decode(self.triggers.connection_json)).https\n    insecure = jsondecode(base64decode(self.triggers.connection_json)).insecure\n    use_ntlm = jsondecode(base64decode(self.triggers.connection_json)).use_ntlm\n    cacert   = jsondecode(base64decode(self.triggers.connection_json)).cacert\n\n    bastion_host        = jsondecode(base64decode(self.triggers.connection_json)).bastion_host\n    bastion_host_key    = jsondecode(base64decode(self.triggers.connection_json)).bastion_host_key\n    bastion_port        = jsondecode(base64decode(self.triggers.connection_json)).bastion_port\n    bastion_user        = jsondecode(base64decode(self.triggers.connection_json)).bastion_user\n    bastion_password    = jsondecode(base64decode(self.triggers.connection_json)).bastion_password\n    bastion_private_key = jsondecode(base64decode(self.triggers.connection_json)).bastion_private_key\n    bastion_certificate = jsondecode(base64decode(self.triggers.connection_json)).bastion_certificate\n  }\n\n  provisioner \"remote-exec\" {\n    inline = [\n      \"until ${self.triggers.kubectl_cmd} get node ${self.triggers.agent_name}; do sleep 1; done\",\n      \"${self.triggers.kubectl_cmd} label --overwrite node ${self.triggers.agent_name} ${self.triggers.label_name}=${self.triggers.on_value_changes}\"\n    ]\n  }\n\n  provisioner \"remote-exec\" {\n    when = destroy\n    inline = [\n      \"${self.triggers.kubectl_cmd} label node ${self.triggers.agent_name} ${self.triggers.label_name}-\"\n    ]\n  }\n}\n\n// Add manually taint on k3s agent\nresource \"null_resource\" \"agents_taint\" {\n  for_each = local.agent_taints\n\n  depends_on = [null_resource.agents_install]\n  triggers = {\n    agent_name       = local.agents_metadata[split(var.separator, each.key)[0]].name\n    taint_name       = split(var.separator, each.key)[1]\n    on_value_changes = each.value\n\n    // Because some fields must be used on destruction, we need to store them into the current\n    // object. The only way to do that is to use triggers to store theses fields.\n    connection_json = base64encode(jsonencode(local.root_server_connection))\n    kubectl_cmd     = local.kubectl_cmd\n  }\n  // Because we dont care about connection modification, we ignore its changes.\n  lifecycle { ignore_changes = [triggers[\"connection_json\"], triggers[\"kubectl_cmd\"]] }\n\n  connection {\n    type = jsondecode(base64decode(self.triggers.connection_json)).type\n\n    host     = jsondecode(base64decode(self.triggers.connection_json)).host\n    user     = jsondecode(base64decode(self.triggers.connection_json)).user\n    password = jsondecode(base64decode(self.triggers.connection_json)).password\n    port     = jsondecode(base64decode(self.triggers.connection_json)).port\n    timeout  = jsondecode(base64decode(self.triggers.connection_json)).timeout\n\n    script_path    = jsondecode(base64decode(self.triggers.connection_json)).script_path\n    private_key    = jsondecode(base64decode(self.triggers.connection_json)).private_key\n    certificate    = jsondecode(base64decode(self.triggers.connection_json)).certificate\n    agent          = jsondecode(base64decode(self.triggers.connection_json)).agent\n    agent_identity = jsondecode(base64decode(self.triggers.connection_json)).agent_identity\n    host_key       = jsondecode(base64decode(self.triggers.connection_json)).host_key\n\n    https    = jsondecode(base64decode(self.triggers.connection_json)).https\n    insecure = jsondecode(base64decode(self.triggers.connection_json)).insecure\n    use_ntlm = jsondecode(base64decode(self.triggers.connection_json)).use_ntlm\n    cacert   = jsondecode(base64decode(self.triggers.connection_json)).cacert\n\n    bastion_host        = jsondecode(base64decode(self.triggers.connection_json)).bastion_host\n    bastion_host_key    = jsondecode(base64decode(self.triggers.connection_json)).bastion_host_key\n    bastion_port        = jsondecode(base64decode(self.triggers.connection_json)).bastion_port\n    bastion_user        = jsondecode(base64decode(self.triggers.connection_json)).bastion_user\n    bastion_password    = jsondecode(base64decode(self.triggers.connection_json)).bastion_password\n    bastion_private_key = jsondecode(base64decode(self.triggers.connection_json)).bastion_private_key\n    bastion_certificate = jsondecode(base64decode(self.triggers.connection_json)).bastion_certificate\n  }\n\n  provisioner \"remote-exec\" {\n    inline = [\n      \"until ${self.triggers.kubectl_cmd} get node ${self.triggers.agent_name}; do sleep 1; done\",\n      \"${self.triggers.kubectl_cmd} taint node ${self.triggers.agent_name} ${self.triggers.taint_name}=${self.triggers.on_value_changes} --overwrite\"\n    ]\n  }\n\n  provisioner \"remote-exec\" {\n    when = destroy\n    inline = [\n      \"${self.triggers.kubectl_cmd} taint node ${self.triggers.agent_name} ${self.triggers.taint_name}-\"\n    ]\n  }\n}\n"
  },
  {
    "path": "examples/civo-k3s/README.md",
    "content": "#  K3S example for Civo\n\nConfiguration in this directory creates a k3s cluster resources instances.\n\n## Usage\n\n> [!warning]\n> **Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.**\n\n```bash\n$ export CIVO_TOKEN=...\n$ terraform init\n$ terraform apply\n```\n"
  },
  {
    "path": "examples/civo-k3s/k3s.tf",
    "content": "module \"k3s\" {\n  source = \"./../..\"\n\n  depends_on_    = civo_instance.node_instances\n  k3s_version    = \"latest\"\n  cluster_domain = \"civo_k3s\"\n\n  drain_timeout            = \"60s\"\n  managed_fields           = [\"label\"]\n  generate_ca_certificates = true\n\n  global_flags = [for instance in civo_instance.node_instances : \"--tls-san ${instance.public_ip}\"]\n\n  servers = {\n    # The node name will be automatically provided by\n    # the module using the field name... any usage of\n    # --node-name in additional_flags will be ignored\n\n    for instance in civo_instance.node_instances :\n    instance.hostname => {\n      ip = instance.private_ip\n      connection = {\n        timeout  = \"60s\"\n        type     = \"ssh\"\n        host     = instance.public_ip\n        password = instance.initial_password\n        user     = \"root\"\n      }\n\n      labels = { \"node.kubernetes.io/type\" = \"master\" }\n    }\n  }\n}\n"
  },
  {
    "path": "examples/civo-k3s/main.tf",
    "content": "data \"civo_disk_image\" \"ubuntu\" {\n  filter {\n    key      = \"name\"\n    values   = [\"ubuntu\"]\n    match_by = \"re\"\n  }\n\n  sort {\n    key       = \"version\"\n    direction = \"desc\"\n  }\n}\n\ndata \"civo_instances_size\" \"node_size\" {\n  filter {\n    key    = \"name\"\n    values = [\"g3.small\"]\n  }\n}\n\nresource \"civo_instance\" \"node_instances\" {\n  count      = 3\n  hostname   = \"node-${count.index + 1}\"\n  size       = data.civo_instances_size.node_size.sizes[0].name\n  disk_image = data.civo_disk_image.ubuntu[count.index].id\n}\n"
  },
  {
    "path": "examples/civo-k3s/outputs.tf",
    "content": "output \"summary\" {\n  value = module.k3s.summary\n}\n\noutput \"kubeconfig\" {\n  value     = module.k3s.kube_config\n  sensitive = true\n}\n"
  },
  {
    "path": "examples/civo-k3s/versions.tf",
    "content": "terraform {\n  required_providers {\n    civo = {\n      source  = \"civo/civo\"\n      version = \"~>0.10.10\"\n    }\n  }\n  required_version = \"~> 1.0\"\n}\n"
  },
  {
    "path": "examples/do-k3s/README.md",
    "content": "#  K3S example for Digital Ocean\n\nConfiguration in this directory creates a k3s cluster resources instances.\n\n## Usage\n\n> [!warning]\n> **Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.**\n\n```bash\n$ export DIGITALOCEAN_TOKEN=...\n$ terraform init\n$ terraform apply\n```\n"
  },
  {
    "path": "examples/do-k3s/k3s.tf",
    "content": "module \"k3s\" {\n  source = \"./../..\"\n\n  depends_on_    = digitalocean_droplet.node_instances\n  k3s_version    = \"latest\"\n  cluster_domain = \"do_k3s\"\n\n  drain_timeout            = \"60s\"\n  managed_fields           = [\"label\"]\n  generate_ca_certificates = true\n\n  global_flags = [for instance in digitalocean_droplet.node_instances : \"--tls-san ${instance.ipv4_address}\"]\n\n  servers = {\n    # The node name will be automatically provided by\n    # the module using the field name... any usage of\n    # --node-name in additional_flags will be ignored\n\n    for instance in digitalocean_droplet.node_instances :\n    instance.name => {\n      ip = instance.ipv4_address_private\n      connection = {\n        timeout     = \"60s\"\n        type        = \"ssh\"\n        host        = instance.ipv4_address\n        private_key = trimspace(tls_private_key.ed25519_provisioning.private_key_pem)\n      }\n\n      labels = { \"node.kubernetes.io/type\" = \"master\" }\n    }\n  }\n}\n"
  },
  {
    "path": "examples/do-k3s/main.tf",
    "content": "data \"digitalocean_image\" \"ubuntu\" {\n  slug = \"ubuntu-22-04-x64\"\n}\n\nresource \"tls_private_key\" \"ed25519_provisioning\" {\n  algorithm = \"ED25519\"\n}\n\nresource \"digitalocean_ssh_key\" \"default\" {\n  name       = \"K3S terraform module - Provisionning SSH key\"\n  public_key = trimspace(tls_private_key.ed25519_provisioning.public_key_openssh)\n}\n\nresource \"digitalocean_droplet\" \"node_instances\" {\n  count = 3\n\n  image    = data.digitalocean_image.ubuntu.slug\n  name     = \"k3s-node-${count.index}\"\n  region   = \"ams3\"\n  size     = \"s-1vcpu-2gb\"\n  ssh_keys = [digitalocean_ssh_key.default.fingerprint]\n}\n"
  },
  {
    "path": "examples/do-k3s/outputs.tf",
    "content": "output \"summary\" {\n  value = module.k3s.summary\n}\n\noutput \"kubeconfig\" {\n  value     = module.k3s.kube_config\n  sensitive = true\n}\n\noutput \"ssh_private_key\" {\n  description = \"Generated SSH private key.\"\n  value       = tls_private_key.ed25519_provisioning.private_key_openssh\n  sensitive   = true\n}\n"
  },
  {
    "path": "examples/do-k3s/versions.tf",
    "content": "terraform {\n  required_providers {\n    digitalocean = {\n      source  = \"digitalocean/digitalocean\"\n      version = \"2.31.0\"\n    }\n  }\n  required_version = \"~> 1.0\"\n}\n"
  },
  {
    "path": "examples/hcloud-k3s/README.md",
    "content": "#  K3S example for Hetzner-Cloud\n\nConfiguration in this directory creates a k3s cluster resources including network, subnet and instances.\n\n## Usage\n\n> [!warning]\n> **Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.**\n\n```bash\n$ export HCLOUD_TOKEN=...\n$ terraform init\n$ terraform apply\n```\n\n## How to connect to a node ?\n\n```bash\nterraform output -raw ssh_private_key | ssh-add -\nssh root@NODE-IP\n```\n"
  },
  {
    "path": "examples/hcloud-k3s/k3s.tf",
    "content": "module \"k3s\" {\n  source = \"./../..\"\n\n  depends_on_    = hcloud_server.agents\n  k3s_version    = \"latest\"\n  cluster_domain = \"cluster.local\"\n  cidr = {\n    pods     = \"10.42.0.0/16\"\n    services = \"10.43.0.0/16\"\n  }\n  drain_timeout  = \"30s\"\n  managed_fields = [\"label\", \"taint\"] // ignore annotations\n\n  global_flags = [\n    \"--flannel-iface ens10\",\n    \"--kubelet-arg cloud-provider=external\" // required to use https://github.com/hetznercloud/hcloud-cloud-controller-manager\n  ]\n\n  servers = {\n    for i in range(length(hcloud_server.control_planes)) :\n    hcloud_server.control_planes[i].name => {\n      ip = hcloud_server_network.control_planes[i].ip\n      connection = {\n        host        = hcloud_server.control_planes[i].ipv4_address\n        private_key = trimspace(tls_private_key.ed25519_provisioning.private_key_pem)\n      }\n      flags = [\n        \"--disable-cloud-controller\",\n        \"--tls-san ${hcloud_server.control_planes[0].ipv4_address}\",\n      ]\n      annotations = { \"server_id\" : i } // theses annotations will not be managed by this module\n    }\n  }\n\n  agents = {\n    for i in range(length(hcloud_server.agents)) :\n    \"${hcloud_server.agents[i].name}_node\" => {\n      name = hcloud_server.agents[i].name\n      ip   = hcloud_server_network.agents_network[i].ip\n      connection = {\n        host        = hcloud_server.agents[i].ipv4_address\n        private_key = trimspace(tls_private_key.ed25519_provisioning.private_key_pem)\n      }\n\n      labels = { \"node.kubernetes.io/pool\" = hcloud_server.agents[i].labels.nodepool }\n      taints = { \"dedicated\" : hcloud_server.agents[i].labels.nodepool == \"gpu\" ? \"gpu:NoSchedule\" : null }\n    }\n  }\n}\n"
  },
  {
    "path": "examples/hcloud-k3s/main.tf",
    "content": "data \"hcloud_image\" \"ubuntu\" {\n  name = \"ubuntu-20.04\"\n}\n\nresource \"tls_private_key\" \"ed25519_provisioning\" {\n  algorithm = \"ED25519\"\n}\n\nresource \"hcloud_ssh_key\" \"default\" {\n  name       = \"K3S terraform module - Provisionning SSH key\"\n  public_key = trimspace(tls_private_key.ed25519_provisioning.public_key_openssh)\n}\n\nresource \"hcloud_network\" \"k3s\" {\n  name     = \"k3s-network\"\n  ip_range = \"10.0.0.0/8\"\n}\n\nresource \"hcloud_network_subnet\" \"k3s_nodes\" {\n  type         = \"server\"\n  network_id   = hcloud_network.k3s.id\n  network_zone = \"eu-central\"\n  ip_range     = \"10.254.1.0/24\"\n}\n\nresource \"hcloud_server_network\" \"control_planes\" {\n  count     = var.servers_num\n  subnet_id = hcloud_network_subnet.k3s_nodes.id\n  server_id = hcloud_server.control_planes[count.index].id\n  ip        = cidrhost(hcloud_network_subnet.k3s_nodes.ip_range, 1 + count.index)\n}\n\nresource \"hcloud_server_network\" \"agents_network\" {\n  count     = length(hcloud_server.agents)\n  server_id = hcloud_server.agents[count.index].id\n  subnet_id = hcloud_network_subnet.k3s_nodes.id\n  ip        = cidrhost(hcloud_network_subnet.k3s_nodes.ip_range, 1 + var.servers_num + count.index)\n}\n\nresource \"hcloud_server\" \"control_planes\" {\n  count = var.servers_num\n  name  = \"k3s-control-plane-${count.index}\"\n\n  image       = data.hcloud_image.ubuntu.name\n  server_type = \"cx11\"\n\n  ssh_keys = [hcloud_ssh_key.default.id]\n  labels = {\n    provisioner = \"terraform\",\n    engine      = \"k3s\",\n    node_type   = \"control-plane\"\n  }\n}\n\n\nresource \"hcloud_server\" \"agents\" {\n  count = var.agents_num\n  name  = \"k3s-agent-${count.index}\"\n\n  image       = data.hcloud_image.ubuntu.name\n  server_type = \"cx11\"\n\n  ssh_keys = [hcloud_ssh_key.default.id]\n  labels = {\n    provisioner = \"terraform\",\n    engine      = \"k3s\",\n    node_type   = \"agent\",\n    nodepool    = count.index % 3 == 0 ? \"gpu\" : \"general\",\n  }\n}"
  },
  {
    "path": "examples/hcloud-k3s/outputs.tf",
    "content": "output \"summary\" {\n  value = module.k3s.summary\n}\n\noutput \"kubeconfig\" {\n  value     = module.k3s.kube_config\n  sensitive = true\n}\n\noutput \"ssh_private_key\" {\n  description = \"Generated SSH private key.\"\n  value       = tls_private_key.ed25519_provisioning.private_key_openssh\n  sensitive   = true\n}\n"
  },
  {
    "path": "examples/hcloud-k3s/variables.tf",
    "content": "variable \"servers_num\" {\n  description = \"Number of control plane nodes.\"\n  default     = 3\n}\n\nvariable \"agents_num\" {\n  description = \"Number of agent nodes.\"\n  default     = 3\n}"
  },
  {
    "path": "examples/hcloud-k3s/versions.tf",
    "content": "terraform {\n  required_providers {\n    hcloud = {\n      source  = \"hetznercloud/hcloud\"\n      version = \"1.44.1\"\n    }\n  }\n  required_version = \"~> 1.0\"\n}\n"
  },
  {
    "path": "k3s_certificates.tf",
    "content": "locals {\n  should_generate_certificates = var.generate_ca_certificates && length(var.kubernetes_certificates) == 0\n  certificates_names           = var.generate_ca_certificates ? [\"client-ca\", \"server-ca\", \"request-header-key-ca\"] : []\n  certificates_types           = { for s in local.certificates_names : index(local.certificates_names, s) => s }\n  certificates_by_type = { for s in local.certificates_names : s =>\n    tls_self_signed_cert.kubernetes_ca_certs[index(local.certificates_names, s)].cert_pem\n  }\n  certificates_files = flatten(\n    [\n      [for s in local.certificates_names :\n        flatten([\n          {\n            \"file_name\"    = \"${s}.key\"\n            \"file_content\" = tls_private_key.kubernetes_ca[index(local.certificates_names, s)].private_key_pem\n          },\n          {\n            \"file_name\"    = \"${s}.crt\"\n            \"file_content\" = tls_self_signed_cert.kubernetes_ca_certs[index(local.certificates_names, s)].cert_pem\n          }\n        ])\n      ]\n      , var.kubernetes_certificates\n    ]\n  )\n  cluster_ca_certificate = var.generate_ca_certificates ? local.certificates_by_type[\"server-ca\"] : null\n  client_certificate     = var.generate_ca_certificates ? tls_locally_signed_cert.master_user[0].cert_pem : null\n  client_key             = var.generate_ca_certificates ? tls_private_key.master_user[0].private_key_pem : null\n}\n\n# Keys\nresource \"tls_private_key\" \"kubernetes_ca\" {\n  count = var.generate_ca_certificates ? 3 : 0\n\n  algorithm   = \"ECDSA\"\n  ecdsa_curve = \"P384\"\n}\n\n# certs\nresource \"tls_self_signed_cert\" \"kubernetes_ca_certs\" {\n  for_each = local.certificates_types\n\n  validity_period_hours = 876600 # 100 years\n  allowed_uses          = [\"digital_signature\", \"key_encipherment\", \"cert_signing\"]\n  private_key_pem       = tls_private_key.kubernetes_ca[each.key].private_key_pem\n  is_ca_certificate     = true\n\n  subject {\n    common_name = \"kubernetes-${each.value}\"\n  }\n}\n\n# master-login cert\nresource \"tls_private_key\" \"master_user\" {\n  count = var.generate_ca_certificates ? 1 : 0\n\n  algorithm   = \"ECDSA\"\n  ecdsa_curve = \"P384\"\n}\n\nresource \"tls_cert_request\" \"master_user\" {\n  count = var.generate_ca_certificates ? 1 : 0\n\n  private_key_pem = tls_private_key.master_user[0].private_key_pem\n\n  subject {\n    common_name  = \"master-user\"\n    organization = \"system:masters\"\n  }\n}\n\nresource \"tls_locally_signed_cert\" \"master_user\" {\n  count = var.generate_ca_certificates ? 1 : 0\n\n  cert_request_pem   = tls_cert_request.master_user[0].cert_request_pem\n  ca_private_key_pem = tls_private_key.kubernetes_ca[0].private_key_pem\n  ca_cert_pem        = tls_self_signed_cert.kubernetes_ca_certs[0].cert_pem\n\n  validity_period_hours = 876600\n\n  allowed_uses = [\n    \"key_encipherment\",\n    \"digital_signature\",\n    \"client_auth\"\n  ]\n}\n"
  },
  {
    "path": "k3s_version.tf",
    "content": "// Fetch the last version of k3s\ndata \"http\" \"k3s_version\" {\n  url = \"https://update.k3s.io/v1-release/channels\"\n}\n\n// Fetch the k3s installation script\ndata \"http\" \"k3s_installer\" {\n  url = \"https://raw.githubusercontent.com/rancher/k3s/${jsondecode(data.http.k3s_version.response_body).data[1].latest}/install.sh\"\n}\n\nlocals {\n  // Use the fetched version if 'lastest' is specified\n  k3s_version = var.k3s_version == \"latest\" ? jsondecode(data.http.k3s_version.response_body).data[1].latest : var.k3s_version\n}\n"
  },
  {
    "path": "main.tf",
    "content": "// Generate the k3s token used by all nodes to join the cluster\nresource \"random_password\" \"k3s_cluster_secret\" {\n  length  = 48\n  special = false\n}\n\nlocals {\n  managed_annotation_enabled = contains(var.managed_fields, \"annotation\")\n  managed_label_enabled      = contains(var.managed_fields, \"label\")\n  managed_taint_enabled      = contains(var.managed_fields, \"taint\")\n}\n\n// null_resource used as dependency agregation.\nresource \"null_resource\" \"kubernetes_ready\" {\n  depends_on = [\n    null_resource.servers_install, null_resource.servers_drain, null_resource.servers_annotation, null_resource.servers_label, null_resource.servers_taint,\n    null_resource.agents_install, null_resource.agents_drain, null_resource.agents_annotation, null_resource.agents_label, null_resource.agents_taint,\n  ]\n}\n"
  },
  {
    "path": "outputs.tf",
    "content": "output \"kubernetes\" {\n  description = \"Authentication credentials of Kubernetes (full administrator).\"\n  value = {\n    cluster_ca_certificate = local.cluster_ca_certificate\n    client_certificate     = local.client_certificate\n    client_key             = local.client_key\n    api_endpoint           = \"https://${local.root_server_connection.host}:6443\"\n    password               = null\n    username               = null\n  }\n  sensitive = true\n}\n\noutput \"kube_config\" {\n  description = \"Genereated kubeconfig.\"\n  value = var.generate_ca_certificates == false ? null : yamlencode({\n    apiVersion = \"v1\"\n    clusters = [{\n      cluster = {\n        certificate-authority-data = base64encode(local.cluster_ca_certificate)\n        server                     = \"https://${local.root_server_connection.host}:6443\"\n      }\n      name = var.cluster_domain\n    }]\n    contexts = [{\n      context = {\n        cluster = var.cluster_domain\n        user : \"master-user\"\n      }\n      name = var.cluster_domain\n    }]\n    current-context = var.cluster_domain\n    kind            = \"Config\"\n    preferences     = {}\n    users = [{\n      user = {\n        client-certificate-data : base64encode(local.client_certificate)\n        client-key-data : base64encode(local.client_key)\n      }\n      name : \"master-user\"\n    }]\n  })\n  sensitive = true\n}\n\noutput \"summary\" {\n  description = \"Current state of k3s (version & nodes).\"\n  value = {\n    version : local.k3s_version\n    servers : [\n      for key, server in var.servers :\n      {\n        name        = local.servers_metadata[key].name\n        annotations = try(server.annotations, [])\n        labels      = try(server.labels, [])\n        taints      = try(server.taints, [])\n      }\n    ]\n    agents : [\n      for key, agent in var.agents :\n      {\n        name        = local.agents_metadata[key].name\n        annotations = try(agent.annotations, [])\n        labels      = try(agent.labels, [])\n        taints      = try(agent.taints, [])\n      }\n    ]\n  }\n}\n\noutput \"kubernetes_ready\" {\n  description = \"Dependency endpoint to synchronize k3s installation and provisioning.\"\n  value       = null_resource.kubernetes_ready\n}\n\noutput \"kubernetes_cluster_secret\" {\n  description = \"Secret token used to join nodes to the cluster\"\n  value       = random_password.k3s_cluster_secret.result\n  sensitive   = true\n}\n"
  },
  {
    "path": "renovate.json",
    "content": "{\n  \"extends\": [\"config:base\"],\n  \"labels\": [\"kind/dependencies\"]\n}\n"
  },
  {
    "path": "server_nodes.tf",
    "content": "locals {\n  // Some vars use to easily access to the first k3s server values\n  root_server_name = keys(var.servers)[0]\n\n  // Get the first address from the IP array using comma's as the delimiter\n  root_advertise_ip = split(\",\", values(var.servers)[0].ip)[0]\n\n  // If root_advertise_ip is IPv6 wrap it in square brackets for IPv6 K3S URLs otherwise leave it raw\n  root_advertise_ip_k3s = can(regex(\"::\", local.root_advertise_ip)) ? \"[${local.root_advertise_ip}]\" : local.root_advertise_ip\n\n  // string representation of all specified extra k3s installation env vars\n  install_env_vars = join(\" \", [for k, v in var.k3s_install_env_vars : \"${k}=${v}\"])\n\n  root_server_connection = {\n    type = try(var.servers[local.root_server_name].connection.type, \"ssh\")\n\n    host     = try(var.servers[local.root_server_name].connection.host, var.servers[local.root_server_name].ip)\n    user     = try(var.servers[local.root_server_name].connection.user, null)\n    password = try(var.servers[local.root_server_name].connection.password, null)\n    port     = try(var.servers[local.root_server_name].connection.port, null)\n    timeout  = try(var.servers[local.root_server_name].connection.timeout, null)\n\n    script_path    = try(var.servers[local.root_server_name].connection.script_path, null)\n    private_key    = try(var.servers[local.root_server_name].connection.private_key, null)\n    certificate    = try(var.servers[local.root_server_name].connection.certificate, null)\n    agent          = try(var.servers[local.root_server_name].connection.agent, null)\n    agent_identity = try(var.servers[local.root_server_name].connection.agent_identity, null)\n    host_key       = try(var.servers[local.root_server_name].connection.host_key, null)\n\n    https    = try(var.servers[local.root_server_name].connection.https, null)\n    insecure = try(var.servers[local.root_server_name].connection.insecure, null)\n    use_ntlm = try(var.servers[local.root_server_name].connection.use_ntlm, null)\n    cacert   = try(var.servers[local.root_server_name].connection.cacert, null)\n\n    bastion_host        = try(var.servers[local.root_server_name].connection.bastion_host, null)\n    bastion_host_key    = try(var.servers[local.root_server_name].connection.bastion_host_key, null)\n    bastion_port        = try(var.servers[local.root_server_name].connection.bastion_port, null)\n    bastion_user        = try(var.servers[local.root_server_name].connection.bastion_user, null)\n    bastion_password    = try(var.servers[local.root_server_name].connection.bastion_password, null)\n    bastion_private_key = try(var.servers[local.root_server_name].connection.bastion_private_key, null)\n    bastion_certificate = try(var.servers[local.root_server_name].connection.bastion_certificate, null)\n  }\n\n  // Generate a map of all servers annotations in order to manage them through this module. This\n  // generation is made in two steps:\n  // - generate a list of objects representing all annotations, following this\n  //   'template' {key = node_name|annotation_name, value = annotation_value}\n  // - generate a map based on the generated list (using the field key as map key)\n  server_annotations_list = flatten([\n    for nk, nv in var.servers : [\n      // Because we need node name and annotation name when we remove the annotation resource, we need\n      // to share them through the annotation key (each.value are not avaible on destruction).\n      for ak, av in try(nv.annotations, {}) : av == null ? { key : \"\" } : { key : \"${nk}${var.separator}${ak}\", value : av }\n    ]\n  ])\n  server_annotations = local.managed_annotation_enabled ? { for o in local.server_annotations_list : o.key => o.value if o.key != \"\" } : {}\n\n  // Generate a map of all servers labels in order to manage them through this module. This\n  // generation is made in two steps, following the same process than annotation's map.\n  server_labels_list = flatten([\n    for nk, nv in var.servers : [\n      // Because we need node name and label name when we remove the label resource, we need\n      // to share them through the label key (each.value are not avaible on destruction).\n      for lk, lv in try(nv.labels, {}) : lv == null ? { key : \"\" } : { key : \"${nk}${var.separator}${lk}\", value : lv }\n    ]\n  ])\n  server_labels = local.managed_label_enabled ? { for o in local.server_labels_list : o.key => o.value if o.key != \"\" } : {}\n\n  // Generate a map of all servers taints in order to manage them through this module. This\n  // generation is made in two steps, following the same process than annotation's map.\n  server_taints_list = flatten([\n    for nk, nv in var.servers : [\n      // Because we need node name and taint name when we remove the taint resource, we need\n      // to share them through the taint key (each.value are not avaible on destruction).\n      for tk, tv in try(nv.taints, {}) : tv == null ? { key : \"\" } : { key : \"${nk}${var.separator}${tk}\", value : tv }\n    ]\n  ])\n  server_taints = local.managed_taint_enabled ? { for o in local.server_taints_list : o.key => o.value if o.key != \"\" } : {}\n\n  // Generate a map of all calculated server fields, used during k3s installation.\n  servers_metadata = {\n    for key, server in var.servers :\n    key => {\n      name = try(server.name, key)\n      ip   = server.ip\n\n      flags = join(\" \", compact(concat(\n        key == local.root_server_name ?\n        // For the first server node, add all configuration flags\n        [\n          \"--advertise-address ${local.root_advertise_ip}\",\n          \"--node-ip ${server.ip}\",\n          \"--node-name '${try(server.name, key)}'\",\n          \"--cluster-domain '${var.cluster_domain}'\",\n          \"--cluster-cidr ${var.cidr.pods}\",\n          \"--service-cidr ${var.cidr.services}\",\n          \"--token ${nonsensitive(random_password.k3s_cluster_secret.result)}\", # NOTE: nonsensitive is used to show logs during provisioning\n          length(var.servers) > 1 ? \"--cluster-init\" : \"\",\n        ] :\n        // For other server nodes, use agent flags (because the first node manage the cluster configuration)\n        [\n          \"--node-ip ${server.ip}\",\n          \"--node-name '${try(server.name, key)}'\",\n          \"--server https://${local.root_advertise_ip_k3s}:6443\",\n          \"--cluster-domain '${var.cluster_domain}'\",\n          \"--cluster-cidr ${var.cidr.pods}\",\n          \"--service-cidr ${var.cidr.services}\",\n          \"--token ${nonsensitive(random_password.k3s_cluster_secret.result)}\", # NOTE: nonsensitive is used to show logs during provisioning\n        ],\n        var.global_flags,\n        try(server.flags, []),\n        [for key, value in try(server.taints, {}) : \"--node-taint '${key}=${value}'\" if value != null]\n      )))\n\n      immutable_fields_hash = sha1(join(\"\", concat(\n        [var.cluster_domain, var.cidr.pods, var.cidr.services],\n        var.global_flags,\n        try(server.flags, []),\n      )))\n    }\n  }\n}\n\n// Install k3s server\nresource \"null_resource\" \"k8s_ca_certificates_install\" {\n  count = length(local.certificates_files)\n\n  depends_on = [var.depends_on_]\n\n  connection {\n    type = try(local.root_server_connection.type, \"ssh\")\n\n    host     = try(local.root_server_connection.host, local.root_server_connection.ip)\n    user     = try(local.root_server_connection.user, null)\n    password = try(local.root_server_connection.password, null)\n    port     = try(local.root_server_connection.port, null)\n    timeout  = try(local.root_server_connection.timeout, null)\n\n    script_path    = try(local.root_server_connection.script_path, null)\n    private_key    = try(local.root_server_connection.private_key, null)\n    certificate    = try(local.root_server_connection.certificate, null)\n    agent          = try(local.root_server_connection.agent, null)\n    agent_identity = try(local.root_server_connection.agent_identity, null)\n    host_key       = try(local.root_server_connection.host_key, null)\n\n    https    = try(local.root_server_connection.https, null)\n    insecure = try(local.root_server_connection.insecure, null)\n    use_ntlm = try(local.root_server_connection.use_ntlm, null)\n    cacert   = try(local.root_server_connection.cacert, null)\n\n    bastion_host        = try(local.root_server_connection.bastion_host, null)\n    bastion_host_key    = try(local.root_server_connection.bastion_host_key, null)\n    bastion_port        = try(local.root_server_connection.bastion_port, null)\n    bastion_user        = try(local.root_server_connection.bastion_user, null)\n    bastion_password    = try(local.root_server_connection.bastion_password, null)\n    bastion_private_key = try(local.root_server_connection.bastion_private_key, null)\n    bastion_certificate = try(local.root_server_connection.bastion_certificate, null)\n  }\n\n  provisioner \"remote-exec\" {\n    inline = [\n      <<-EOT\n      # --- use sudo if we are not already root ---\n      [ $(id -u) -eq 0 ] || exec sudo -n $0 $@\n\n      mkdir -p /var/lib/rancher/k3s/server/tls/\n      echo '${local.certificates_files[count.index].file_content}' > /var/lib/rancher/k3s/server/tls/${local.certificates_files[count.index].file_name}\n      EOT\n    ]\n  }\n}\n\nresource \"null_resource\" \"servers_install\" {\n  for_each = var.servers\n\n  depends_on = [var.depends_on_, null_resource.k8s_ca_certificates_install]\n  triggers = {\n    on_immutable_changes = local.servers_metadata[each.key].immutable_fields_hash\n    on_new_version       = local.k3s_version\n  }\n\n  connection {\n    type = try(each.value.connection.type, \"ssh\")\n\n    host     = try(each.value.connection.host, each.value.ip)\n    user     = try(each.value.connection.user, null)\n    password = try(each.value.connection.password, null)\n    port     = try(each.value.connection.port, null)\n    timeout  = try(each.value.connection.timeout, null)\n\n    script_path    = try(each.value.connection.script_path, null)\n    private_key    = try(each.value.connection.private_key, null)\n    certificate    = try(each.value.connection.certificate, null)\n    agent          = try(each.value.connection.agent, null)\n    agent_identity = try(each.value.connection.agent_identity, null)\n    host_key       = try(each.value.connection.host_key, null)\n\n    https    = try(each.value.connection.https, null)\n    insecure = try(each.value.connection.insecure, null)\n    use_ntlm = try(each.value.connection.use_ntlm, null)\n    cacert   = try(each.value.connection.cacert, null)\n\n    bastion_host        = try(each.value.connection.bastion_host, null)\n    bastion_host_key    = try(each.value.connection.bastion_host_key, null)\n    bastion_port        = try(each.value.connection.bastion_port, null)\n    bastion_user        = try(each.value.connection.bastion_user, null)\n    bastion_password    = try(each.value.connection.bastion_password, null)\n    bastion_private_key = try(each.value.connection.bastion_private_key, null)\n    bastion_certificate = try(each.value.connection.bastion_certificate, null)\n  }\n\n  // Upload k3s file\n  provisioner \"file\" {\n    content     = data.http.k3s_installer.response_body\n    destination = \"/tmp/k3s-installer\"\n  }\n\n  // Install k3s server\n  provisioner \"remote-exec\" {\n    inline = [\n      \"${local.install_env_vars} INSTALL_K3S_VERSION=${local.k3s_version} sh /tmp/k3s-installer server ${local.servers_metadata[each.key].flags}\",\n      \"until ${local.kubectl_cmd} get node ${local.servers_metadata[each.key].name}; do sleep 1; done\"\n    ]\n  }\n}\n\n// Drain k3s node on destruction in order to safely move all workflows to another node.\nresource \"null_resource\" \"servers_drain\" {\n  for_each = var.servers\n\n  depends_on = [null_resource.servers_install]\n  triggers = {\n    server_name     = local.servers_metadata[split(var.separator, each.key)[0]].name\n    connection_json = base64encode(jsonencode(local.root_server_connection))\n    drain_timeout   = var.drain_timeout\n    kubectl_cmd     = local.kubectl_cmd\n  }\n  lifecycle { ignore_changes = [triggers] }\n\n  connection {\n    type = jsondecode(base64decode(self.triggers.connection_json)).type\n\n    host     = jsondecode(base64decode(self.triggers.connection_json)).host\n    user     = jsondecode(base64decode(self.triggers.connection_json)).user\n    password = jsondecode(base64decode(self.triggers.connection_json)).password\n    port     = jsondecode(base64decode(self.triggers.connection_json)).port\n    timeout  = jsondecode(base64decode(self.triggers.connection_json)).timeout\n\n    script_path    = jsondecode(base64decode(self.triggers.connection_json)).script_path\n    private_key    = jsondecode(base64decode(self.triggers.connection_json)).private_key\n    certificate    = jsondecode(base64decode(self.triggers.connection_json)).certificate\n    agent          = jsondecode(base64decode(self.triggers.connection_json)).agent\n    agent_identity = jsondecode(base64decode(self.triggers.connection_json)).agent_identity\n    host_key       = jsondecode(base64decode(self.triggers.connection_json)).host_key\n\n    https    = jsondecode(base64decode(self.triggers.connection_json)).https\n    insecure = jsondecode(base64decode(self.triggers.connection_json)).insecure\n    use_ntlm = jsondecode(base64decode(self.triggers.connection_json)).use_ntlm\n    cacert   = jsondecode(base64decode(self.triggers.connection_json)).cacert\n\n    bastion_host        = jsondecode(base64decode(self.triggers.connection_json)).bastion_host\n    bastion_host_key    = jsondecode(base64decode(self.triggers.connection_json)).bastion_host_key\n    bastion_port        = jsondecode(base64decode(self.triggers.connection_json)).bastion_port\n    bastion_user        = jsondecode(base64decode(self.triggers.connection_json)).bastion_user\n    bastion_password    = jsondecode(base64decode(self.triggers.connection_json)).bastion_password\n    bastion_private_key = jsondecode(base64decode(self.triggers.connection_json)).bastion_private_key\n    bastion_certificate = jsondecode(base64decode(self.triggers.connection_json)).bastion_certificate\n  }\n\n  provisioner \"remote-exec\" {\n    when = destroy\n    inline = [\n      \"${self.triggers.kubectl_cmd} drain ${self.triggers.server_name} --delete-local-data --force --ignore-daemonsets --timeout=${self.triggers.drain_timeout}\"\n    ]\n  }\n}\n\n// Add/remove manually annotation on k3s server\nresource \"null_resource\" \"servers_annotation\" {\n  for_each = local.server_annotations\n\n  depends_on = [null_resource.servers_install]\n  triggers = {\n    server_name      = local.servers_metadata[split(var.separator, each.key)[0]].name\n    annotation_name  = split(var.separator, each.key)[1]\n    on_value_changes = each.value\n\n    connection_json = base64encode(jsonencode(local.root_server_connection))\n    kubectl_cmd     = local.kubectl_cmd\n  }\n  lifecycle { ignore_changes = [triggers[\"connection_json\"], triggers[\"kubectl_cmd\"]] }\n\n  connection {\n    type = jsondecode(base64decode(self.triggers.connection_json)).type\n\n    host     = jsondecode(base64decode(self.triggers.connection_json)).host\n    user     = jsondecode(base64decode(self.triggers.connection_json)).user\n    password = jsondecode(base64decode(self.triggers.connection_json)).password\n    port     = jsondecode(base64decode(self.triggers.connection_json)).port\n    timeout  = jsondecode(base64decode(self.triggers.connection_json)).timeout\n\n    script_path    = jsondecode(base64decode(self.triggers.connection_json)).script_path\n    private_key    = jsondecode(base64decode(self.triggers.connection_json)).private_key\n    certificate    = jsondecode(base64decode(self.triggers.connection_json)).certificate\n    agent          = jsondecode(base64decode(self.triggers.connection_json)).agent\n    agent_identity = jsondecode(base64decode(self.triggers.connection_json)).agent_identity\n    host_key       = jsondecode(base64decode(self.triggers.connection_json)).host_key\n\n    https    = jsondecode(base64decode(self.triggers.connection_json)).https\n    insecure = jsondecode(base64decode(self.triggers.connection_json)).insecure\n    use_ntlm = jsondecode(base64decode(self.triggers.connection_json)).use_ntlm\n    cacert   = jsondecode(base64decode(self.triggers.connection_json)).cacert\n\n    bastion_host        = jsondecode(base64decode(self.triggers.connection_json)).bastion_host\n    bastion_host_key    = jsondecode(base64decode(self.triggers.connection_json)).bastion_host_key\n    bastion_port        = jsondecode(base64decode(self.triggers.connection_json)).bastion_port\n    bastion_user        = jsondecode(base64decode(self.triggers.connection_json)).bastion_user\n    bastion_password    = jsondecode(base64decode(self.triggers.connection_json)).bastion_password\n    bastion_private_key = jsondecode(base64decode(self.triggers.connection_json)).bastion_private_key\n    bastion_certificate = jsondecode(base64decode(self.triggers.connection_json)).bastion_certificate\n  }\n\n  provisioner \"remote-exec\" {\n    inline = [\n      \"${self.triggers.kubectl_cmd} annotate --overwrite node ${self.triggers.server_name} ${self.triggers.annotation_name}=${self.triggers.on_value_changes}\"\n    ]\n  }\n\n  provisioner \"remote-exec\" {\n    when = destroy\n    inline = [\n      \"${self.triggers.kubectl_cmd} annotate node ${self.triggers.server_name} ${self.triggers.annotation_name}-\"\n    ]\n  }\n}\n\n// Add/remove manually label on k3s server\nresource \"null_resource\" \"servers_label\" {\n  for_each = local.server_labels\n\n  depends_on = [null_resource.servers_install]\n  triggers = {\n    server_name      = local.servers_metadata[split(var.separator, each.key)[0]].name\n    label_name       = split(var.separator, each.key)[1]\n    on_value_changes = each.value\n\n    connection_json = base64encode(jsonencode(local.root_server_connection))\n    kubectl_cmd     = local.kubectl_cmd\n  }\n  lifecycle { ignore_changes = [triggers[\"connection_json\"], triggers[\"kubectl_cmd\"]] }\n\n  connection {\n    type = jsondecode(base64decode(self.triggers.connection_json)).type\n\n    host     = jsondecode(base64decode(self.triggers.connection_json)).host\n    user     = jsondecode(base64decode(self.triggers.connection_json)).user\n    password = jsondecode(base64decode(self.triggers.connection_json)).password\n    port     = jsondecode(base64decode(self.triggers.connection_json)).port\n    timeout  = jsondecode(base64decode(self.triggers.connection_json)).timeout\n\n    script_path    = jsondecode(base64decode(self.triggers.connection_json)).script_path\n    private_key    = jsondecode(base64decode(self.triggers.connection_json)).private_key\n    certificate    = jsondecode(base64decode(self.triggers.connection_json)).certificate\n    agent          = jsondecode(base64decode(self.triggers.connection_json)).agent\n    agent_identity = jsondecode(base64decode(self.triggers.connection_json)).agent_identity\n    host_key       = jsondecode(base64decode(self.triggers.connection_json)).host_key\n\n    https    = jsondecode(base64decode(self.triggers.connection_json)).https\n    insecure = jsondecode(base64decode(self.triggers.connection_json)).insecure\n    use_ntlm = jsondecode(base64decode(self.triggers.connection_json)).use_ntlm\n    cacert   = jsondecode(base64decode(self.triggers.connection_json)).cacert\n\n    bastion_host        = jsondecode(base64decode(self.triggers.connection_json)).bastion_host\n    bastion_host_key    = jsondecode(base64decode(self.triggers.connection_json)).bastion_host_key\n    bastion_port        = jsondecode(base64decode(self.triggers.connection_json)).bastion_port\n    bastion_user        = jsondecode(base64decode(self.triggers.connection_json)).bastion_user\n    bastion_password    = jsondecode(base64decode(self.triggers.connection_json)).bastion_password\n    bastion_private_key = jsondecode(base64decode(self.triggers.connection_json)).bastion_private_key\n    bastion_certificate = jsondecode(base64decode(self.triggers.connection_json)).bastion_certificate\n  }\n\n  provisioner \"remote-exec\" {\n    inline = [\n      \"${self.triggers.kubectl_cmd} label --overwrite node ${self.triggers.server_name} ${self.triggers.label_name}=${self.triggers.on_value_changes}\"\n    ]\n  }\n\n  provisioner \"remote-exec\" {\n    when = destroy\n    inline = [\n      \"${self.triggers.kubectl_cmd} label node ${self.triggers.server_name} ${self.triggers.label_name}-\"\n    ]\n  }\n}\n\n// Add/remove manually taint on k3s server\nresource \"null_resource\" \"servers_taint\" {\n  for_each = local.server_taints\n\n  depends_on = [null_resource.servers_install]\n  triggers = {\n    server_name      = local.servers_metadata[split(var.separator, each.key)[0]].name\n    taint_name       = split(var.separator, each.key)[1]\n    connection_json  = base64encode(jsonencode(local.root_server_connection))\n    on_value_changes = each.value\n\n    connection_json = base64encode(jsonencode(local.root_server_connection))\n    kubectl_cmd     = local.kubectl_cmd\n  }\n  lifecycle { ignore_changes = [triggers[\"connection_json\"], triggers[\"kubectl_cmd\"]] }\n\n  connection {\n    type = jsondecode(base64decode(self.triggers.connection_json)).type\n\n    host     = jsondecode(base64decode(self.triggers.connection_json)).host\n    user     = jsondecode(base64decode(self.triggers.connection_json)).user\n    password = jsondecode(base64decode(self.triggers.connection_json)).password\n    port     = jsondecode(base64decode(self.triggers.connection_json)).port\n    timeout  = jsondecode(base64decode(self.triggers.connection_json)).timeout\n\n    script_path    = jsondecode(base64decode(self.triggers.connection_json)).script_path\n    private_key    = jsondecode(base64decode(self.triggers.connection_json)).private_key\n    certificate    = jsondecode(base64decode(self.triggers.connection_json)).certificate\n    agent          = jsondecode(base64decode(self.triggers.connection_json)).agent\n    agent_identity = jsondecode(base64decode(self.triggers.connection_json)).agent_identity\n    host_key       = jsondecode(base64decode(self.triggers.connection_json)).host_key\n\n    https    = jsondecode(base64decode(self.triggers.connection_json)).https\n    insecure = jsondecode(base64decode(self.triggers.connection_json)).insecure\n    use_ntlm = jsondecode(base64decode(self.triggers.connection_json)).use_ntlm\n    cacert   = jsondecode(base64decode(self.triggers.connection_json)).cacert\n\n    bastion_host        = jsondecode(base64decode(self.triggers.connection_json)).bastion_host\n    bastion_host_key    = jsondecode(base64decode(self.triggers.connection_json)).bastion_host_key\n    bastion_port        = jsondecode(base64decode(self.triggers.connection_json)).bastion_port\n    bastion_user        = jsondecode(base64decode(self.triggers.connection_json)).bastion_user\n    bastion_password    = jsondecode(base64decode(self.triggers.connection_json)).bastion_password\n    bastion_private_key = jsondecode(base64decode(self.triggers.connection_json)).bastion_private_key\n    bastion_certificate = jsondecode(base64decode(self.triggers.connection_json)).bastion_certificate\n  }\n\n  provisioner \"remote-exec\" {\n    inline = [\n      \"${self.triggers.kubectl_cmd} taint node ${self.triggers.server_name} ${self.triggers.taint_name}=${self.triggers.on_value_changes} --overwrite\"\n    ]\n  }\n\n  provisioner \"remote-exec\" {\n    when = destroy\n    inline = [\n      \"${self.triggers.kubectl_cmd} taint node ${self.triggers.server_name} ${self.triggers.taint_name}-\"\n    ]\n  }\n}\n"
  },
  {
    "path": "variables.tf",
    "content": "variable \"depends_on_\" {\n  description = \"Resource dependency of this module.\"\n  default     = null\n}\n\nvariable \"k3s_version\" {\n  description = \"Specify the k3s version. You can choose from the following release channels or pin the version directly\"\n  type        = string\n  default     = \"latest\"\n}\n\nvariable \"k3s_install_env_vars\" {\n  description = \"map of enviroment variables that are passed to the k3s installation script (see https://docs.k3s.io/reference/env-variables)\"\n  type        = map(string)\n  default     = {}\n\n  validation {\n    condition     = !can(var.k3s_install_env_vars[\"INSTALL_K3S_VERSION\"])\n    error_message = \"Environment variable \\\"INSTALL_K3S_VERSION\\\" needs to be set via variable k3s_version\"\n  }\n}\n\nvariable \"name\" {\n  description = \"K3s cluster domain name (see https://rancher.com/docs/k3s/latest/en/installation/install-options/). This input is deprecated and will be remove in the next major release. Use `cluster_domain` instead.\"\n  type        = string\n  default     = \"!!!DEPRECATED!!!\"\n\n  validation {\n    condition     = var.name == \"!!!DEPRECATED!!!\"\n    error_message = \"Variable `name` is deprecated, use `cluster_domain` instead. It will be removed at the next major release.\"\n  }\n}\n\nvariable \"cluster_domain\" {\n  description = \"K3s cluster domain name (see https://rancher.com/docs/k3s/latest/en/installation/install-options/).\"\n  type        = string\n  default     = \"cluster.local\"\n}\n\nvariable \"generate_ca_certificates\" {\n  description = \"If true, this module will generate the CA certificates (see https://github.com/rancher/k3s/issues/1868#issuecomment-639690634). Otherwise rancher will generate it. This is required to generate kubeconfig\"\n  type        = bool\n  default     = true\n}\n\nvariable \"kubernetes_certificates\" {\n  description = \"A list of maps of cerificate-name.[crt/key] : cerficate-value to copied to /var/lib/rancher/k3s/server/tls, if this option is used generate_ca_certificates will be treat as false\"\n  type = list(\n    object({\n      file_name    = string,\n      file_content = string\n    })\n  )\n  default = []\n}\n\nvariable \"cidr\" {\n  description = \"K3s network CIDRs (see https://rancher.com/docs/k3s/latest/en/installation/install-options/).\"\n  type = object({\n    pods     = string\n    services = string\n  })\n  default = {\n    pods     = \"10.42.0.0/16\"\n    services = \"10.43.0.0/16\"\n  }\n}\n\nvariable \"drain_timeout\" {\n  description = \"The length of time to wait before giving up the node draining. Infinite by default.\"\n  type        = string\n  default     = \"0s\"\n}\n\nvariable \"global_flags\" {\n  description = \"Add additional installation flags, used by all nodes (see https://rancher.com/docs/k3s/latest/en/installation/install-options/).\"\n  type        = list(string)\n  default     = []\n}\n\nvariable \"servers\" {\n  description = \"K3s server nodes definition. The key is used as node name if no name is provided.\"\n  type        = map(any)\n\n  validation {\n    condition     = length(var.servers) > 0\n    error_message = \"At least one server node must be provided.\"\n  }\n  validation {\n    condition     = length(var.servers) % 2 == 1\n    error_message = \"Servers must have an odd number of nodes.\"\n  }\n  validation {\n    condition     = can(values(var.servers)[*].ip)\n    error_message = \"Field servers.<name>.ip is required.\"\n  }\n  validation {\n    condition     = !can(values(var.servers)[*].connection) || !contains([for v in var.servers : can(tomap(v.connection))], false)\n    error_message = \"Field servers.<name>.connection must be a valid Terraform connection.\"\n  }\n  validation {\n    condition     = !can(values(var.servers)[*].flags) || !contains([for v in var.servers : can(tolist(v.flags))], false)\n    error_message = \"Field servers.<name>.flags must be a list of string (see: https://docs.k3s.io/cli/server).\"\n  }\n  validation {\n    condition     = !can(values(var.servers)[*].annotations) || !contains([for v in var.servers : can(tomap(v.annotations))], false)\n    error_message = \"Field servers.<name>.annotations must be a map of string.\"\n  }\n  validation {\n    condition     = !can(values(var.servers)[*].labels) || !contains([for v in var.servers : can(tomap(v.labels))], false)\n    error_message = \"Field servers.<name>.labels must be a map of string.\"\n  }\n  validation {\n    condition     = !can(values(var.servers)[*].taints) || !contains([for v in var.servers : can(tomap(v.taints))], false)\n    error_message = \"Field servers.<name>.taints must be a map of string.\"\n  }\n}\n\nvariable \"agents\" {\n  description = \"K3s agent nodes definitions. The key is used as node name if no name is provided.\"\n  type        = map(any)\n  default     = {}\n\n  validation {\n    condition     = can(values(var.agents)[*].ip)\n    error_message = \"Field agents.<name>.ip is required.\"\n  }\n  validation {\n    condition     = !can(values(var.agents)[*].connection) || !contains([for v in var.agents : can(tomap(v.connection))], false)\n    error_message = \"Field agents.<name>.connection must be a valid Terraform connection.\"\n  }\n  validation {\n    condition     = !can(values(var.agents)[*].flags) || !contains([for v in var.agents : can(tolist(v.flags))], false)\n    error_message = \"Field agents.<name>.flags must be a list of string (see: https://docs.k3s.io/cli/agent).\"\n  }\n  validation {\n    condition     = !can(values(var.agents)[*].annotations) || !contains([for v in var.agents : can(tomap(v.annotations))], false)\n    error_message = \"Field agents.<name>.annotations must be a map of string.\"\n  }\n  validation {\n    condition     = !can(values(var.agents)[*].labels) || !contains([for v in var.agents : can(tomap(v.labels))], false)\n    error_message = \"Field agents.<name>.labels must be a map of string.\"\n  }\n  validation {\n    condition     = !can(values(var.agents)[*].taints) || !contains([for v in var.agents : can(tomap(v.taints))], false)\n    error_message = \"Field agents.<name>.taints must be a map of string.\"\n  }\n}\n\nvariable \"managed_fields\" {\n  description = \"List of fields which must be managed by this module (can be annotation, label and/or taint).\"\n  type        = list(string)\n  default     = [\"annotation\", \"label\", \"taint\"]\n}\n\nvariable \"separator\" {\n  description = \"Separator used to separates node name and field name (used to manage annotations, labels and taints).\"\n  default     = \"|\"\n}\n\nvariable \"use_sudo\" {\n  description = \"Whether or not to use kubectl with sudo during cluster setup.\"\n  default     = false\n  type        = bool\n}\n"
  },
  {
    "path": "versions.tf",
    "content": "terraform {\n  required_providers {\n    http = {\n      source  = \"hashicorp/http\"\n      version = \"~> 3.0\"\n    }\n    null = {\n      source  = \"hashicorp/null\"\n      version = \"~> 3.0\"\n    }\n    random = {\n      source  = \"hashicorp/random\"\n      version = \"~> 3.0\"\n    }\n    tls = {\n      source  = \"hashicorp/tls\"\n      version = \"~> 4.0\"\n    }\n  }\n\n  required_version = \"~> 1.0\"\n}\n"
  }
]